Repository: fledge-iot/fledge Branch: develop Commit: 4e34f009d742 Files: 2685 Total size: 13.7 MB Directory structure: gitextract_gasud238/ ├── .cursor/ │ ├── rules/ │ │ ├── C/ │ │ │ ├── core.mdc │ │ │ └── plugins/ │ │ │ ├── filter.mdc │ │ │ ├── north.mdc │ │ │ └── south.mdc │ │ ├── README.md │ │ ├── docs.mdc │ │ ├── python/ │ │ │ ├── api.mdc │ │ │ ├── config.mdc │ │ │ ├── core.mdc │ │ │ └── quality.mdc │ │ └── tests/ │ │ └── python/ │ │ ├── api.mdc │ │ └── unit.mdc │ └── services/ │ ├── notification.mdc │ └── notification_code_review.mdc ├── .github/ │ ├── ISSUE_TEMPLATE/ │ │ ├── bug_report.yml │ │ ├── config.yml │ │ ├── doc_issue.yml │ │ └── feature_request.yml │ └── workflows/ │ └── checker.yml ├── .gitignore ├── .readthedocs.yaml ├── ADOPTERS.MD ├── C/ │ ├── common/ │ │ ├── CMakeLists.txt │ │ ├── JSONPath.cpp │ │ ├── acl.cpp │ │ ├── aggregate.cpp │ │ ├── asset_tracking.cpp │ │ ├── audit_logger.cpp │ │ ├── base64databuffer.cpp │ │ ├── base64image.cpp │ │ ├── bearer_token.cpp │ │ ├── config_category.cpp │ │ ├── cryptography_utils.cpp │ │ ├── databuffer.cpp │ │ ├── datapoint.cpp │ │ ├── datapoint_utility.cpp │ │ ├── file_utils.cpp │ │ ├── filter_pipeline.cpp │ │ ├── filter_plugin.cpp │ │ ├── form_data.cpp │ │ ├── image.cpp │ │ ├── include/ │ │ │ ├── JSONPath.h │ │ │ ├── acl.h │ │ │ ├── aggregate.h │ │ │ ├── asset_tracking.h │ │ │ ├── audit_logger.h │ │ │ ├── base64.h │ │ │ ├── base64databuffer.h │ │ │ ├── base64dpimage.h │ │ │ ├── bearer_token.h │ │ │ ├── config_category.h │ │ │ ├── cryptography_utils.h │ │ │ ├── databuffer.h │ │ │ ├── datapoint.h │ │ │ ├── datapoint_utility.h │ │ │ ├── dpimage.h │ │ │ ├── expression.h │ │ │ ├── exprtk.hpp │ │ │ ├── file_utils.h │ │ │ ├── filter_pipeline.h │ │ │ ├── filter_plugin.h │ │ │ ├── form_data.h │ │ │ ├── insert.h │ │ │ ├── join.h │ │ │ ├── json_properties.h │ │ │ ├── json_provider.h │ │ │ ├── json_utils.h │ │ │ ├── logger.h │ │ │ ├── management_client.h │ │ │ ├── pipeline_debugger.h │ │ │ ├── pipeline_element.h │ │ │ ├── plugin_data.h │ │ │ ├── process.h │ │ │ ├── purge_result.h │ │ │ ├── pyruntime.h │ │ │ ├── pythonconfigcategory.h │ │ │ ├── pythonreading.h │ │ │ ├── pythonreadingset.h │ │ │ ├── query.h │ │ │ ├── reading.h │ │ │ ├── reading_circularbuffer.h │ │ │ ├── reading_set.h │ │ │ ├── reading_stream.h │ │ │ ├── readingset_circularbuffer.h │ │ │ ├── resultset.h │ │ │ ├── returns.h │ │ │ ├── service_record.h │ │ │ ├── sort.h │ │ │ ├── storage_client.h │ │ │ ├── string_utils.h │ │ │ ├── timebucket.h │ │ │ ├── update_modifier.h │ │ │ ├── utils.h │ │ │ ├── value.h │ │ │ └── where.h │ │ ├── join.cpp │ │ ├── json_utils.cpp │ │ ├── logger.cpp │ │ ├── management_client.cpp │ │ ├── pipeline_branch.cpp │ │ ├── pipeline_debugger.cpp │ │ ├── pipeline_element.cpp │ │ ├── pipeline_filter.cpp │ │ ├── pipeline_writer.cpp │ │ ├── plugin_data.cpp │ │ ├── process.cpp │ │ ├── purge_result.cpp │ │ ├── pyexception.cpp │ │ ├── pyruntime.cpp │ │ ├── pythonconfigcategory.cpp │ │ ├── pythonreading.cpp │ │ ├── pythonreadingset.cpp │ │ ├── query.cpp │ │ ├── reading.cpp │ │ ├── reading_circularbuffer.cpp │ │ ├── reading_set.cpp │ │ ├── readingset_circularbuffer.cpp │ │ ├── result_set.cpp │ │ ├── service_record.cpp │ │ ├── storage_client.cpp │ │ ├── string_utils.cpp │ │ └── where.cpp │ ├── plugins/ │ │ ├── common/ │ │ │ ├── CMakeLists.txt │ │ │ ├── http_sender.cpp │ │ │ ├── include/ │ │ │ │ ├── http_sender.h │ │ │ │ ├── libcurl_https.h │ │ │ │ ├── piwebapi.h │ │ │ │ ├── simple_http.h │ │ │ │ └── simple_https.h │ │ │ ├── libcurl_https.cpp │ │ │ ├── piwebapi.cpp │ │ │ ├── simple_http.cpp │ │ │ └── simple_https.cpp │ │ ├── filter/ │ │ │ └── common/ │ │ │ ├── CMakeLists.txt │ │ │ ├── filter.cpp │ │ │ └── include/ │ │ │ └── filter.h │ │ ├── north/ │ │ │ └── OMF/ │ │ │ ├── CMakeLists.txt │ │ │ ├── OMFError.cpp │ │ │ ├── include/ │ │ │ │ ├── OMFHint.h │ │ │ │ ├── basetypes.h │ │ │ │ ├── linkedlookup.h │ │ │ │ ├── ocs.h │ │ │ │ ├── omf.h │ │ │ │ ├── omfbuffer.h │ │ │ │ ├── omferror.h │ │ │ │ ├── omfinfo.h │ │ │ │ └── omflinkeddata.h │ │ │ ├── linkdata.cpp │ │ │ ├── ocs.cpp │ │ │ ├── omf.cpp │ │ │ ├── omfbuffer.cpp │ │ │ ├── omfhints.cpp │ │ │ ├── omfinfo.cpp │ │ │ └── plugin.cpp │ │ ├── storage/ │ │ │ ├── CMakeLists.txt │ │ │ ├── README.rst │ │ │ ├── common/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── disk_monitor.cpp │ │ │ │ ├── include/ │ │ │ │ │ ├── disk_monitor.h │ │ │ │ │ └── sql_buffer.h │ │ │ │ └── sql_buffer.cpp │ │ │ ├── postgres/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── CheckRhPg.cmake │ │ │ │ ├── README.rst │ │ │ │ ├── connection.cpp │ │ │ │ ├── connection_manager.cpp │ │ │ │ ├── include/ │ │ │ │ │ ├── connection.h │ │ │ │ │ └── connection_manager.h │ │ │ │ └── plugin.cpp │ │ │ ├── sqlite/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── Findsqlite3.cmake │ │ │ │ ├── common/ │ │ │ │ │ ├── connection.cpp │ │ │ │ │ ├── connection_manager.cpp │ │ │ │ │ ├── include/ │ │ │ │ │ │ ├── connection.h │ │ │ │ │ │ ├── connection_manager.h │ │ │ │ │ │ ├── purge_configuration.h │ │ │ │ │ │ ├── readings_catalogue.h │ │ │ │ │ │ └── sqlite_common.h │ │ │ │ │ ├── purge_configuration.cpp │ │ │ │ │ ├── readings.cpp │ │ │ │ │ └── readings_catalogue.cpp │ │ │ │ ├── include/ │ │ │ │ │ ├── common.h │ │ │ │ │ └── profile.h │ │ │ │ ├── plugin.cpp │ │ │ │ └── schema/ │ │ │ │ ├── include/ │ │ │ │ │ └── schema.h │ │ │ │ └── schema.cpp │ │ │ ├── sqlitelb/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── Findsqlite3.cmake │ │ │ │ ├── common/ │ │ │ │ │ ├── connection.cpp │ │ │ │ │ ├── connection_manager.cpp │ │ │ │ │ ├── include/ │ │ │ │ │ │ ├── connection.h │ │ │ │ │ │ └── connection_manager.h │ │ │ │ │ └── readings.cpp │ │ │ │ ├── include/ │ │ │ │ │ ├── common.h │ │ │ │ │ └── profile.h │ │ │ │ └── plugin.cpp │ │ │ └── sqlitememory/ │ │ │ ├── CMakeLists.txt │ │ │ ├── Findsqlite3.cmake │ │ │ ├── connection.cpp │ │ │ ├── include/ │ │ │ │ ├── connection.h │ │ │ │ └── connection_manager.h │ │ │ └── plugin.cpp │ │ └── utils/ │ │ ├── CMakeLists.txt │ │ ├── cmdutil.cpp │ │ └── get_plugin_info.cpp │ ├── services/ │ │ ├── common/ │ │ │ ├── CMakeLists.txt │ │ │ ├── README.rst │ │ │ ├── config_handler.cpp │ │ │ ├── filter_python_plugin_handle.cpp │ │ │ ├── include/ │ │ │ │ ├── binary_plugin_handle.h │ │ │ │ ├── config_handler.h │ │ │ │ ├── filter_python_plugin_handle.h │ │ │ │ ├── management_api.h │ │ │ │ ├── north_python_plugin_handle.h │ │ │ │ ├── notification_python_plugin_handle.h │ │ │ │ ├── perfmonitors.h │ │ │ │ ├── plugin.h │ │ │ │ ├── plugin_api.h │ │ │ │ ├── plugin_exception.h │ │ │ │ ├── plugin_handle.h │ │ │ │ ├── plugin_manager.h │ │ │ │ ├── python_plugin_handle.h │ │ │ │ ├── service_handler.h │ │ │ │ └── south_python_plugin_handle.h │ │ │ ├── management_api.cpp │ │ │ ├── north_python_plugin_handle.cpp │ │ │ ├── notification_python_plugin_handle.cpp │ │ │ ├── perfmonitor.cpp │ │ │ ├── plugin.cpp │ │ │ ├── plugin_manager.cpp │ │ │ ├── service_security.cpp │ │ │ └── south_python_plugin_handle.cpp │ │ ├── common-plugin-interfaces/ │ │ │ └── python/ │ │ │ └── include/ │ │ │ └── python_plugin_common_interface.h │ │ ├── core/ │ │ │ ├── CMakeLists.txt │ │ │ ├── configuration_manager.cpp │ │ │ ├── core_management_api.cpp │ │ │ ├── include/ │ │ │ │ ├── configuration_manager.h │ │ │ │ ├── core_management_api.h │ │ │ │ └── service_registry.h │ │ │ └── service_registry.cpp │ │ ├── filter-plugin-interfaces/ │ │ │ └── python/ │ │ │ ├── CMakeLists.txt │ │ │ ├── filter_ingest_pymodule/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ └── ingest_callback_pymodule.cpp │ │ │ └── python_plugin_interface.cpp │ │ ├── north/ │ │ │ ├── CMakeLists.txt │ │ │ ├── README.rst │ │ │ ├── data_load.cpp │ │ │ ├── data_send.cpp │ │ │ ├── include/ │ │ │ │ ├── data_load.h │ │ │ │ ├── data_sender.h │ │ │ │ ├── defaults.h │ │ │ │ ├── north_api.h │ │ │ │ ├── north_plugin.h │ │ │ │ └── north_service.h │ │ │ ├── north.cpp │ │ │ ├── north_api.cpp │ │ │ └── north_plugin.cpp │ │ ├── north-plugin-interfaces/ │ │ │ └── python/ │ │ │ ├── CMakeLists.txt │ │ │ └── python_plugin_interface.cpp │ │ ├── notification-plugin-interfaces/ │ │ │ └── python/ │ │ │ ├── CMakeLists.txt │ │ │ └── python_plugin_interface.cpp │ │ ├── south/ │ │ │ ├── CMakeLists.txt │ │ │ ├── README.rst │ │ │ ├── include/ │ │ │ │ ├── defaults.h │ │ │ │ ├── ingest.h │ │ │ │ ├── ingest_rate.h │ │ │ │ ├── south_api.h │ │ │ │ ├── south_plugin.h │ │ │ │ └── south_service.h │ │ │ ├── ingest.cpp │ │ │ ├── ingestRate.cpp │ │ │ ├── south.cpp │ │ │ ├── south_api.cpp │ │ │ └── south_plugin.cpp │ │ ├── south-plugin-interfaces/ │ │ │ └── python/ │ │ │ ├── CMakeLists.txt │ │ │ ├── async_ingest_pymodule/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ └── ingest_callback_pymodule.cpp │ │ │ └── python_plugin_interface.cpp │ │ └── storage/ │ │ ├── CMakeLists.txt │ │ ├── README.rst │ │ ├── configuration.cpp │ │ ├── include/ │ │ │ ├── configuration.h │ │ │ ├── plugin_configuration.h │ │ │ ├── storage_api.h │ │ │ ├── storage_plugin.h │ │ │ ├── storage_registry.h │ │ │ ├── storage_service.h │ │ │ ├── storage_stats.h │ │ │ └── stream_handler.h │ │ ├── pluginconfiguration.cpp │ │ ├── storage │ │ ├── storage.cpp │ │ ├── storage_api.cpp │ │ ├── storage_plugin.cpp │ │ ├── storage_registry.cpp │ │ ├── storage_stats.cpp │ │ └── stream_handler.cpp │ ├── tasks/ │ │ ├── check_updates/ │ │ │ ├── CMakeLists.txt │ │ │ ├── check_updates.cpp │ │ │ ├── include/ │ │ │ │ └── check_updates.h │ │ │ └── main.cpp │ │ ├── north/ │ │ │ ├── CMakeLists.txt │ │ │ └── sending_process/ │ │ │ ├── CMakeLists.txt │ │ │ ├── include/ │ │ │ │ ├── north_filter_pipeline.h │ │ │ │ ├── north_plugin.h │ │ │ │ └── sending.h │ │ │ ├── north_filter_pipeline.cpp │ │ │ ├── north_plugin.cpp │ │ │ ├── sending.cpp │ │ │ └── sending_process.cpp │ │ ├── purge_system/ │ │ │ ├── CMakeLists.txt │ │ │ ├── include/ │ │ │ │ └── purge_system.h │ │ │ ├── main.cpp │ │ │ └── purge_system.cpp │ │ └── statistics_history/ │ │ ├── CMakeLists.txt │ │ ├── include/ │ │ │ └── stats_history.h │ │ ├── main.cpp │ │ └── stats_history.cpp │ └── thirdparty/ │ ├── Simple-Web-Server/ │ │ ├── CMakeLists.txt │ │ ├── LICENSE │ │ ├── README.md │ │ ├── asio_compatibility.hpp │ │ ├── client_http.hpp │ │ ├── client_https.hpp │ │ ├── crypto.hpp │ │ ├── docs/ │ │ │ ├── Doxyfile │ │ │ └── benchmarks.md │ │ ├── http_examples.cpp │ │ ├── https_examples.cpp │ │ ├── mutex.hpp │ │ ├── server_http.hpp │ │ ├── server_https.hpp │ │ ├── status_code.hpp │ │ ├── tests/ │ │ │ ├── CMakeLists.txt │ │ │ ├── assert.hpp │ │ │ ├── crypto_test.cpp │ │ │ ├── io_test.cpp │ │ │ ├── parse_test.cpp │ │ │ └── status_code_test.cpp │ │ ├── utility.hpp │ │ └── web/ │ │ ├── index.html │ │ └── test.html │ └── rapidjson/ │ ├── .gitattributes │ ├── .gitignore │ ├── .gitmodules │ ├── .travis.yml │ ├── CHANGELOG.md │ ├── CMakeLists.txt │ ├── CMakeModules/ │ │ └── FindGTestSrc.cmake │ ├── RapidJSON.pc.in │ ├── RapidJSONConfig.cmake.in │ ├── RapidJSONConfigVersion.cmake.in │ ├── appveyor.yml │ ├── bin/ │ │ ├── data/ │ │ │ ├── glossary.json │ │ │ ├── menu.json │ │ │ ├── readme.txt │ │ │ ├── sample.json │ │ │ ├── webapp.json │ │ │ └── widget.json │ │ ├── encodings/ │ │ │ ├── utf16be.json │ │ │ ├── utf16bebom.json │ │ │ ├── utf16le.json │ │ │ ├── utf16lebom.json │ │ │ ├── utf32be.json │ │ │ ├── utf32bebom.json │ │ │ ├── utf32le.json │ │ │ ├── utf32lebom.json │ │ │ ├── utf8.json │ │ │ └── utf8bom.json │ │ ├── jsonchecker/ │ │ │ ├── fail1.json │ │ │ ├── fail10.json │ │ │ ├── fail11.json │ │ │ ├── fail12.json │ │ │ ├── fail13.json │ │ │ ├── fail14.json │ │ │ ├── fail15.json │ │ │ ├── fail16.json │ │ │ ├── fail17.json │ │ │ ├── fail18.json │ │ │ ├── fail19.json │ │ │ ├── fail2.json │ │ │ ├── fail20.json │ │ │ ├── fail21.json │ │ │ ├── fail22.json │ │ │ ├── fail23.json │ │ │ ├── fail24.json │ │ │ ├── fail25.json │ │ │ ├── fail26.json │ │ │ ├── fail27.json │ │ │ ├── fail28.json │ │ │ ├── fail29.json │ │ │ ├── fail3.json │ │ │ ├── fail30.json │ │ │ ├── fail31.json │ │ │ ├── fail32.json │ │ │ ├── fail33.json │ │ │ ├── fail4.json │ │ │ ├── fail5.json │ │ │ ├── fail6.json │ │ │ ├── fail7.json │ │ │ ├── fail8.json │ │ │ ├── fail9.json │ │ │ ├── pass1.json │ │ │ ├── pass2.json │ │ │ ├── pass3.json │ │ │ └── readme.txt │ │ └── types/ │ │ ├── booleans.json │ │ ├── floats.json │ │ ├── guids.json │ │ ├── integers.json │ │ ├── mixed.json │ │ ├── nulls.json │ │ ├── paragraphs.json │ │ └── readme.txt │ ├── contrib/ │ │ └── natvis/ │ │ ├── LICENSE │ │ ├── README.md │ │ └── rapidjson.natvis │ ├── doc/ │ │ ├── CMakeLists.txt │ │ ├── Doxyfile.in │ │ ├── Doxyfile.zh-cn.in │ │ ├── diagram/ │ │ │ ├── architecture.dot │ │ │ ├── insituparsing.dot │ │ │ ├── iterative-parser-states-diagram.dot │ │ │ ├── move1.dot │ │ │ ├── move2.dot │ │ │ ├── move3.dot │ │ │ ├── normalparsing.dot │ │ │ ├── simpledom.dot │ │ │ ├── tutorial.dot │ │ │ └── utilityclass.dot │ │ ├── dom.md │ │ ├── dom.zh-cn.md │ │ ├── encoding.md │ │ ├── encoding.zh-cn.md │ │ ├── faq.md │ │ ├── faq.zh-cn.md │ │ ├── features.md │ │ ├── features.zh-cn.md │ │ ├── internals.md │ │ ├── internals.zh-cn.md │ │ ├── misc/ │ │ │ ├── DoxygenLayout.xml │ │ │ ├── doxygenextra.css │ │ │ ├── footer.html │ │ │ └── header.html │ │ ├── npm.md │ │ ├── performance.md │ │ ├── performance.zh-cn.md │ │ ├── pointer.md │ │ ├── pointer.zh-cn.md │ │ ├── sax.md │ │ ├── sax.zh-cn.md │ │ ├── schema.md │ │ ├── schema.zh-cn.md │ │ ├── stream.md │ │ ├── stream.zh-cn.md │ │ ├── tutorial.md │ │ └── tutorial.zh-cn.md │ ├── example/ │ │ ├── CMakeLists.txt │ │ ├── capitalize/ │ │ │ └── capitalize.cpp │ │ ├── condense/ │ │ │ └── condense.cpp │ │ ├── filterkey/ │ │ │ └── filterkey.cpp │ │ ├── filterkeydom/ │ │ │ └── filterkeydom.cpp │ │ ├── jsonx/ │ │ │ └── jsonx.cpp │ │ ├── lookaheadparser/ │ │ │ └── lookaheadparser.cpp │ │ ├── messagereader/ │ │ │ └── messagereader.cpp │ │ ├── parsebyparts/ │ │ │ └── parsebyparts.cpp │ │ ├── pretty/ │ │ │ └── pretty.cpp │ │ ├── prettyauto/ │ │ │ └── prettyauto.cpp │ │ ├── schemavalidator/ │ │ │ └── schemavalidator.cpp │ │ ├── serialize/ │ │ │ └── serialize.cpp │ │ ├── simpledom/ │ │ │ └── simpledom.cpp │ │ ├── simplepullreader/ │ │ │ └── simplepullreader.cpp │ │ ├── simplereader/ │ │ │ └── simplereader.cpp │ │ ├── simplewriter/ │ │ │ └── simplewriter.cpp │ │ └── tutorial/ │ │ └── tutorial.cpp │ ├── include/ │ │ └── rapidjson/ │ │ ├── allocators.h │ │ ├── cursorstreamwrapper.h │ │ ├── document.h │ │ ├── encodedstream.h │ │ ├── encodings.h │ │ ├── error/ │ │ │ ├── en.h │ │ │ └── error.h │ │ ├── filereadstream.h │ │ ├── filewritestream.h │ │ ├── fwd.h │ │ ├── internal/ │ │ │ ├── biginteger.h │ │ │ ├── clzll.h │ │ │ ├── diyfp.h │ │ │ ├── dtoa.h │ │ │ ├── ieee754.h │ │ │ ├── itoa.h │ │ │ ├── meta.h │ │ │ ├── pow10.h │ │ │ ├── regex.h │ │ │ ├── stack.h │ │ │ ├── strfunc.h │ │ │ ├── strtod.h │ │ │ └── swap.h │ │ ├── istreamwrapper.h │ │ ├── memorybuffer.h │ │ ├── memorystream.h │ │ ├── msinttypes/ │ │ │ ├── inttypes.h │ │ │ └── stdint.h │ │ ├── ostreamwrapper.h │ │ ├── pointer.h │ │ ├── prettywriter.h │ │ ├── rapidjson.h │ │ ├── reader.h │ │ ├── schema.h │ │ ├── stream.h │ │ ├── stringbuffer.h │ │ ├── uri.h │ │ └── writer.h │ ├── include_dirs.js │ ├── library.json │ ├── license.txt │ ├── package.json │ ├── rapidjson.autopkg │ ├── readme.md │ ├── readme.zh-cn.md │ ├── test/ │ │ ├── CMakeLists.txt │ │ ├── perftest/ │ │ │ ├── CMakeLists.txt │ │ │ ├── misctest.cpp │ │ │ ├── perftest.cpp │ │ │ ├── perftest.h │ │ │ ├── platformtest.cpp │ │ │ ├── rapidjsontest.cpp │ │ │ └── schematest.cpp │ │ ├── unittest/ │ │ │ ├── CMakeLists.txt │ │ │ ├── allocatorstest.cpp │ │ │ ├── bigintegertest.cpp │ │ │ ├── documenttest.cpp │ │ │ ├── dtoatest.cpp │ │ │ ├── encodedstreamtest.cpp │ │ │ ├── encodingstest.cpp │ │ │ ├── filestreamtest.cpp │ │ │ ├── fwdtest.cpp │ │ │ ├── istreamwrappertest.cpp │ │ │ ├── itoatest.cpp │ │ │ ├── jsoncheckertest.cpp │ │ │ ├── namespacetest.cpp │ │ │ ├── ostreamwrappertest.cpp │ │ │ ├── pointertest.cpp │ │ │ ├── prettywritertest.cpp │ │ │ ├── readertest.cpp │ │ │ ├── regextest.cpp │ │ │ ├── schematest.cpp │ │ │ ├── simdtest.cpp │ │ │ ├── strfunctest.cpp │ │ │ ├── stringbuffertest.cpp │ │ │ ├── strtodtest.cpp │ │ │ ├── unittest.cpp │ │ │ ├── unittest.h │ │ │ ├── valuetest.cpp │ │ │ └── writertest.cpp │ │ └── valgrind.supp │ └── travis-doxygen.sh ├── CMakeLists.txt ├── CONTRIBUTING.md ├── GOVERNANCE.MD ├── LICENSE ├── Makefile ├── README.rst ├── SECURITY.MD ├── VERSION ├── contrib/ │ └── .gitkeep ├── data/ │ ├── etc/ │ │ └── kerberos/ │ │ └── README.rst │ └── extras/ │ └── fogbench/ │ └── fogbench_sensor_coap.template.json ├── dco-signoffs/ │ ├── AmandeepSinghArora-dco-signoff.txt │ ├── AshishJabble-dco-signoff.txt │ ├── AshwinGopalakrishnan-dco-signoff.txt │ ├── BillHunt-dco-signoff.txt │ ├── MarkRiddoch-dco-signoff.txt │ ├── MassimilianoPinto-dco-signoff.txt │ ├── MohdShariq-dco-signoff.txt │ ├── MonikaSharma-dco-signoff.txt │ ├── OriShadmon-dco-signoff.txt │ ├── PraveenGarg-dco-signoff.txt │ ├── StefanoSimonelli-dco-signoff.txt │ ├── YashTatkondawar-dco-signoff.txt │ └── other-dco-signoff.txt ├── docs/ │ ├── 91_version_history.rst │ ├── 92_downloads.rst │ ├── KERBEROS.rst │ ├── Makefile │ ├── OMF.rst │ ├── RASPBIAN.rst │ ├── _static/ │ │ ├── .gitkeep │ │ ├── theme_overrides.css │ │ └── version_menu.css │ ├── _templates/ │ │ └── breadcrumbs.html │ ├── acl.rst │ ├── build_index.rst │ ├── building_fledge/ │ │ ├── 01_introduction.rst │ │ ├── 04_installation.rst │ │ ├── 04_utilities.rst │ │ ├── 05_tasks.rst │ │ ├── 06_testing.rst │ │ ├── building_fledge.rst │ │ └── index.rst │ ├── building_pipelines.rst │ ├── check-sphinx.py │ ├── conf.py │ ├── control.rst │ ├── control_scripts.rst │ ├── fledge-north-OMF.rst │ ├── fledge-rule-DataAvailability/ │ │ └── index.rst │ ├── fledge-rule-Threshold/ │ │ └── index.rst │ ├── fledge_architecture.rst │ ├── glossary.rst │ ├── index.rst │ ├── introduction.rst │ ├── keywords/ │ │ ├── Augmentation │ │ ├── Cleansing │ │ ├── Cloud │ │ ├── Compression │ │ ├── Governance │ │ ├── Image │ │ ├── Labelling │ │ ├── MQTT │ │ ├── Mathematical │ │ ├── ModelExecution │ │ ├── Namespace │ │ ├── PLC │ │ ├── README.rst │ │ ├── Scripted │ │ ├── Signal Processing │ │ ├── Simulation │ │ ├── Structure │ │ ├── Textual │ │ └── Vibration │ ├── make.bat │ ├── monitoring/ │ │ ├── configuration.rst │ │ ├── flow.rst │ │ ├── index.rst │ │ ├── introduction.rst │ │ ├── quality.rst │ │ ├── resources.rst │ │ └── service.rst │ ├── plugin_developers_guide/ │ │ ├── 00_source_code_doc.rst │ │ ├── 01_01_Data.rst │ │ ├── 01_Fledge_plugins.rst │ │ ├── 02_persisting_data.rst │ │ ├── 02_writing_plugins.rst │ │ ├── 035_CPP.rst │ │ ├── 037_hybrid_plugins.rst │ │ ├── 03_01_DHT11.rst │ │ ├── 03_02_Control.rst │ │ ├── 03_02_DHT11_C.rst │ │ ├── 03_02_south_python_Control.rst │ │ ├── 03_south_C_plugins.rst │ │ ├── 03_south_plugins.rst │ │ ├── 04_north_plugins.rst │ │ ├── 05_storage_plugins.rst │ │ ├── 06_filter_plugins.rst │ │ ├── 07_rules_plugins.rst │ │ ├── 08_notify_plugins.rst │ │ ├── 08_storage.rst │ │ ├── 09_packaging.rst │ │ ├── 10_testing.rst │ │ ├── 11_WSL2.rst │ │ └── index.rst │ ├── plugin_index.rst │ ├── processing_data.rst │ ├── quick_start/ │ │ ├── backup.rst │ │ ├── datasources.rst │ │ ├── gui.rst │ │ ├── index.rst │ │ ├── installing.rst │ │ ├── instructions.txt │ │ ├── north.rst │ │ ├── platforms.rst │ │ ├── starting.rst │ │ ├── support.rst │ │ ├── troubleshooting.rst │ │ ├── uninstalling.rst │ │ ├── update.rst │ │ └── viewing.rst │ ├── requirements.txt │ ├── rest_api_guide/ │ │ ├── 01_REST.rst │ │ ├── 02_RESTauthentication.rst │ │ ├── 03_RESTadmin.rst │ │ ├── 03_RESTassetTracker.rst │ │ ├── 03_RESTservices.rst │ │ ├── 03_RESTstatistics.rst │ │ ├── 03_RESTupdate.rst │ │ ├── 04_RESTuser.rst │ │ ├── 05_RESTdeveloper.rst │ │ ├── 06_GrafanaExamples.rst │ │ └── index.rst │ ├── scripts/ │ │ ├── fledge_plugin_list │ │ └── plugin_and_service_documentation │ ├── securing_fledge.rst │ ├── storage.rst │ ├── troubleshooting_pi-server_integration.rst │ └── tuning_fledge.rst ├── doxy.config ├── extras/ │ ├── python/ │ │ ├── .gitignore │ │ └── fogbench/ │ │ ├── __init__.py │ │ ├── __main__.py │ │ └── exceptions.py │ └── scripts/ │ ├── fledge.service │ └── setenv.sh ├── mkversion ├── python/ │ ├── .gitignore │ ├── .pylintrc │ ├── __init__.py │ ├── __template__.py │ ├── fledge/ │ │ ├── __init__.py │ │ ├── apps/ │ │ │ ├── __init__.py │ │ │ └── common/ │ │ │ ├── README.rst │ │ │ └── __init__.py │ │ ├── common/ │ │ │ ├── README.rst │ │ │ ├── __init__.py │ │ │ ├── acl_manager.py │ │ │ ├── alert_manager.py │ │ │ ├── audit_logger.py │ │ │ ├── common.py │ │ │ ├── configuration_manager.py │ │ │ ├── iprpc.py │ │ │ ├── logger.py │ │ │ ├── microservice_management_client/ │ │ │ │ ├── __init__.py │ │ │ │ ├── exceptions.py │ │ │ │ └── microservice_management_client.py │ │ │ ├── parser.py │ │ │ ├── plugin_discovery.py │ │ │ ├── plugin_helpers.py │ │ │ ├── process.py │ │ │ ├── service_record.py │ │ │ ├── statistics.py │ │ │ ├── storage_client/ │ │ │ │ ├── __init__.py │ │ │ │ ├── exceptions.py │ │ │ │ ├── payload_builder.py │ │ │ │ ├── storage_client.py │ │ │ │ └── utils.py │ │ │ ├── utils.py │ │ │ └── web/ │ │ │ ├── __init__.py │ │ │ ├── middleware.py │ │ │ └── ssl_wrapper.py │ │ ├── plugins/ │ │ │ ├── __init__.py │ │ │ ├── common/ │ │ │ │ ├── __init__.py │ │ │ │ ├── shim/ │ │ │ │ │ └── __init__.py │ │ │ │ └── utils.py │ │ │ ├── north/ │ │ │ │ ├── README.rst │ │ │ │ ├── __init__.py │ │ │ │ ├── common/ │ │ │ │ │ ├── README.rst │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── common.py │ │ │ │ │ └── exceptions.py │ │ │ │ └── empty/ │ │ │ │ ├── README.rst │ │ │ │ ├── __init__.py │ │ │ │ └── empty.py │ │ │ └── storage/ │ │ │ ├── __init__.py │ │ │ ├── common/ │ │ │ │ ├── __init__.py │ │ │ │ ├── backup.py │ │ │ │ ├── exceptions.py │ │ │ │ ├── lib.py │ │ │ │ └── restore.py │ │ │ ├── postgres/ │ │ │ │ ├── __init__.py │ │ │ │ └── backup_restore/ │ │ │ │ ├── __init__.py │ │ │ │ ├── backup_postgres.py │ │ │ │ ├── exceptions.py │ │ │ │ ├── lib.py │ │ │ │ └── restore_postgres.py │ │ │ └── sqlite/ │ │ │ ├── __init__.py │ │ │ └── backup_restore/ │ │ │ ├── __init__.py │ │ │ ├── backup_sqlite.py │ │ │ └── restore_sqlite.py │ │ ├── services/ │ │ │ ├── __init__.py │ │ │ ├── common/ │ │ │ │ ├── README.rst │ │ │ │ ├── __init__.py │ │ │ │ ├── microservice.py │ │ │ │ ├── microservice_management/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── routes.py │ │ │ │ ├── service_announcer.py │ │ │ │ └── utils.py │ │ │ ├── core/ │ │ │ │ ├── README.rst │ │ │ │ ├── __init__.py │ │ │ │ ├── __main__.py │ │ │ │ ├── api/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── alerts.py │ │ │ │ │ ├── asset_tracker.py │ │ │ │ │ ├── audit.py │ │ │ │ │ ├── auth.py │ │ │ │ │ ├── backup_restore.py │ │ │ │ │ ├── browser.py │ │ │ │ │ ├── certificate_store.py │ │ │ │ │ ├── common.py │ │ │ │ │ ├── configuration.py │ │ │ │ │ ├── control_service/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── acl_management.py │ │ │ │ │ │ ├── entrypoint.py │ │ │ │ │ │ ├── exceptions.py │ │ │ │ │ │ ├── pipeline.py │ │ │ │ │ │ └── script_management.py │ │ │ │ │ ├── exceptions.py │ │ │ │ │ ├── filters.py │ │ │ │ │ ├── health.py │ │ │ │ │ ├── north.py │ │ │ │ │ ├── notification.py │ │ │ │ │ ├── package_log.py │ │ │ │ │ ├── performance_monitor.py │ │ │ │ │ ├── pipeline_debugger.py │ │ │ │ │ ├── plugins/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── common.py │ │ │ │ │ │ ├── config_validator.py │ │ │ │ │ │ ├── data.py │ │ │ │ │ │ ├── discovery.py │ │ │ │ │ │ ├── exceptions.py │ │ │ │ │ │ ├── install.py │ │ │ │ │ │ ├── remove.py │ │ │ │ │ │ └── update.py │ │ │ │ │ ├── python_packages.py │ │ │ │ │ ├── repos/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── configure.py │ │ │ │ │ ├── scheduler.py │ │ │ │ │ ├── service.py │ │ │ │ │ ├── snapshot/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── plugins.py │ │ │ │ │ │ └── table.py │ │ │ │ │ ├── south.py │ │ │ │ │ ├── statistics.py │ │ │ │ │ ├── support.py │ │ │ │ │ ├── task.py │ │ │ │ │ ├── update.py │ │ │ │ │ └── utils.py │ │ │ │ ├── asset_tracker/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── asset_tracker.py │ │ │ │ ├── connect.py │ │ │ │ ├── firewall.py │ │ │ │ ├── interest_registry/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── change_callback.py │ │ │ │ │ ├── exceptions.py │ │ │ │ │ ├── interest_record.py │ │ │ │ │ └── interest_registry.py │ │ │ │ ├── proxy.py │ │ │ │ ├── routes.py │ │ │ │ ├── scheduler/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── entities.py │ │ │ │ │ ├── exceptions.py │ │ │ │ │ └── scheduler.py │ │ │ │ ├── server.py │ │ │ │ ├── service_registry/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── exceptions.py │ │ │ │ │ ├── monitor.py │ │ │ │ │ └── service_registry.py │ │ │ │ ├── snapshot.py │ │ │ │ ├── support.py │ │ │ │ └── user_model.py │ │ │ └── south/ │ │ │ ├── __init__.py │ │ │ └── exceptions.py │ │ └── tasks/ │ │ ├── README.rst │ │ ├── __init__.py │ │ ├── automation_script/ │ │ │ ├── __init__.py │ │ │ └── __main__.py │ │ ├── common/ │ │ │ ├── README.rst │ │ │ └── __init__.py │ │ ├── purge/ │ │ │ ├── README.rst │ │ │ ├── __init__.py │ │ │ ├── __main__.py │ │ │ └── purge.py │ │ └── statistics/ │ │ ├── README.rst │ │ ├── __init__.py │ │ ├── __main__.py │ │ └── statistics_history.py │ ├── requirements-dev.txt │ ├── requirements-test.txt │ ├── requirements.txt │ ├── setup.py │ └── thirdparty/ │ └── README.rst ├── requirements.sh ├── scripts/ │ ├── __template__.sh │ ├── auth_certificates │ ├── certificates │ ├── common/ │ │ ├── README.rst │ │ ├── audittime.py │ │ ├── check_schema_update.sh │ │ ├── disk_usage.py │ │ ├── get_engine_management.sh │ │ ├── get_logs.sh │ │ ├── get_platform.sh │ │ ├── get_readings_plugin.sh │ │ ├── get_storage_plugin.sh │ │ ├── json_parse.py │ │ ├── loglevel.py │ │ ├── service_status.py │ │ ├── try_catch.sh │ │ └── write_log.sh │ ├── debug/ │ │ ├── .debugrc │ │ ├── README.rst │ │ ├── attach │ │ ├── buffer │ │ ├── commands │ │ ├── debug │ │ ├── detach │ │ ├── isolate │ │ ├── replay │ │ ├── resumeIngest │ │ ├── setBuffer │ │ ├── state │ │ ├── step │ │ ├── store │ │ └── suspendIngest │ ├── extras/ │ │ ├── fledge.sudoers │ │ ├── fledge.sudoers_rh │ │ ├── fledge_update │ │ ├── fogbench │ │ ├── update_task.apt │ │ ├── update_task.snappy │ │ └── update_task.yum │ ├── fledge │ ├── fledge_mnt │ ├── package/ │ │ └── debian/ │ │ ├── package_update.sh │ │ └── upgrade/ │ │ ├── 1.4.sh │ │ ├── 1.5.sh │ │ └── 1.8.sh │ ├── plugins/ │ │ └── storage/ │ │ ├── postgres/ │ │ │ ├── downgrade/ │ │ │ │ ├── 1.sql │ │ │ │ ├── 10.sql │ │ │ │ ├── 11.sql │ │ │ │ ├── 12.sql │ │ │ │ ├── 13.sql │ │ │ │ ├── 14.sql │ │ │ │ ├── 15.sql │ │ │ │ ├── 16.sql │ │ │ │ ├── 17.sql │ │ │ │ ├── 18.sql │ │ │ │ ├── 19.sql │ │ │ │ ├── 2.sql │ │ │ │ ├── 20.sql │ │ │ │ ├── 21.sql │ │ │ │ ├── 22.sql │ │ │ │ ├── 23.sql │ │ │ │ ├── 24.sql │ │ │ │ ├── 25.sql │ │ │ │ ├── 26.sql │ │ │ │ ├── 27.sql │ │ │ │ ├── 28.sql │ │ │ │ ├── 29.sql │ │ │ │ ├── 3.sql │ │ │ │ ├── 30.sql │ │ │ │ ├── 31.sql │ │ │ │ ├── 32.sql │ │ │ │ ├── 33.sql │ │ │ │ ├── 34.sql │ │ │ │ ├── 35.sql │ │ │ │ ├── 36.sql │ │ │ │ ├── 37.sql │ │ │ │ ├── 38.sql │ │ │ │ ├── 39.sql │ │ │ │ ├── 4.sql │ │ │ │ ├── 40.sql │ │ │ │ ├── 41.sql │ │ │ │ ├── 42.sql │ │ │ │ ├── 43.sql │ │ │ │ ├── 44.sql │ │ │ │ ├── 45.sql │ │ │ │ ├── 46.sql │ │ │ │ ├── 47.sql │ │ │ │ ├── 48.sql │ │ │ │ ├── 49.sql │ │ │ │ ├── 5.sql │ │ │ │ ├── 50.sql │ │ │ │ ├── 51.sql │ │ │ │ ├── 52.sql │ │ │ │ ├── 53.sql │ │ │ │ ├── 54.sql │ │ │ │ ├── 55.sql │ │ │ │ ├── 56.sql │ │ │ │ ├── 57.sql │ │ │ │ ├── 58.sql │ │ │ │ ├── 59.sql │ │ │ │ ├── 6.sql │ │ │ │ ├── 60.sql │ │ │ │ ├── 61.sql │ │ │ │ ├── 62.sql │ │ │ │ ├── 63.sql │ │ │ │ ├── 64.sql │ │ │ │ ├── 65.sql │ │ │ │ ├── 66.sql │ │ │ │ ├── 67.sql │ │ │ │ ├── 68.sql │ │ │ │ ├── 69.sql │ │ │ │ ├── 7.sql │ │ │ │ ├── 70.sql │ │ │ │ ├── 71.sql │ │ │ │ ├── 72.sql │ │ │ │ ├── 73.sql │ │ │ │ ├── 74.sql │ │ │ │ ├── 75.sql │ │ │ │ ├── 8.sql │ │ │ │ ├── 9.sql │ │ │ │ └── README │ │ │ ├── init.sql │ │ │ ├── schema_update.sh │ │ │ └── upgrade/ │ │ │ ├── 10.sql │ │ │ ├── 11.sql │ │ │ ├── 12.sql │ │ │ ├── 13.sql │ │ │ ├── 14.sql │ │ │ ├── 15.sql │ │ │ ├── 16.sql │ │ │ ├── 17.sql │ │ │ ├── 18.sql │ │ │ ├── 19.sql │ │ │ ├── 2.sql │ │ │ ├── 20.sql │ │ │ ├── 21.sql │ │ │ ├── 22.sql │ │ │ ├── 23.sql │ │ │ ├── 24.sql │ │ │ ├── 25.sql │ │ │ ├── 26.sql │ │ │ ├── 27.sql │ │ │ ├── 28.sql │ │ │ ├── 29.sql │ │ │ ├── 3.sql │ │ │ ├── 30.sql │ │ │ ├── 31.sql │ │ │ ├── 32.sql │ │ │ ├── 33.sql │ │ │ ├── 34.sql │ │ │ ├── 35.sql │ │ │ ├── 36.sql │ │ │ ├── 37.sql │ │ │ ├── 38.sql │ │ │ ├── 39.sql │ │ │ ├── 4.sql │ │ │ ├── 40.sql │ │ │ ├── 41.sql │ │ │ ├── 42.sql │ │ │ ├── 43.sql │ │ │ ├── 44.sql │ │ │ ├── 45.sql │ │ │ ├── 46.sql │ │ │ ├── 47.sql │ │ │ ├── 48.sql │ │ │ ├── 49.sql │ │ │ ├── 5.sql │ │ │ ├── 50.sql │ │ │ ├── 51.sql │ │ │ ├── 52.sql │ │ │ ├── 53.sql │ │ │ ├── 54.sql │ │ │ ├── 55.sql │ │ │ ├── 56.sql │ │ │ ├── 57.sql │ │ │ ├── 58.sql │ │ │ ├── 59.sql │ │ │ ├── 6.sql │ │ │ ├── 60.sql │ │ │ ├── 61.sql │ │ │ ├── 62.sql │ │ │ ├── 63.sql │ │ │ ├── 64.sql │ │ │ ├── 65.sql │ │ │ ├── 66.sql │ │ │ ├── 67.sql │ │ │ ├── 68.sql │ │ │ ├── 69.sql │ │ │ ├── 7.sql │ │ │ ├── 70.sql │ │ │ ├── 71.sql │ │ │ ├── 72.sql │ │ │ ├── 73.sql │ │ │ ├── 74.sql │ │ │ ├── 75.sql │ │ │ ├── 76.sql │ │ │ ├── 8.sql │ │ │ ├── 9.sql │ │ │ └── README │ │ ├── postgres.sh │ │ ├── sqlite/ │ │ │ ├── downgrade/ │ │ │ │ ├── 1.sql │ │ │ │ ├── 10.sql │ │ │ │ ├── 11.sql │ │ │ │ ├── 12.sql │ │ │ │ ├── 13.sql │ │ │ │ ├── 14.sql │ │ │ │ ├── 15.sql │ │ │ │ ├── 16.sql │ │ │ │ ├── 17.sql │ │ │ │ ├── 18.sql │ │ │ │ ├── 19.sql │ │ │ │ ├── 2.sql │ │ │ │ ├── 20.sql │ │ │ │ ├── 21.sql │ │ │ │ ├── 22.sql │ │ │ │ ├── 23.sql │ │ │ │ ├── 24.sql │ │ │ │ ├── 25.sql │ │ │ │ ├── 26.sql │ │ │ │ ├── 27.sql │ │ │ │ ├── 28.sql │ │ │ │ ├── 29.sql │ │ │ │ ├── 3.sql │ │ │ │ ├── 30.sql │ │ │ │ ├── 31.sql │ │ │ │ ├── 32.sql │ │ │ │ ├── 33.sql │ │ │ │ ├── 34.sh │ │ │ │ ├── 34.sql │ │ │ │ ├── 35.sql │ │ │ │ ├── 36.sql │ │ │ │ ├── 37.sh │ │ │ │ ├── 37.sql │ │ │ │ ├── 38.sql │ │ │ │ ├── 39.sql │ │ │ │ ├── 4.sql │ │ │ │ ├── 40.sql │ │ │ │ ├── 41.sql │ │ │ │ ├── 42.sql │ │ │ │ ├── 43.sql │ │ │ │ ├── 44.sql │ │ │ │ ├── 45.sql │ │ │ │ ├── 46.sql │ │ │ │ ├── 47.sql │ │ │ │ ├── 48.sql │ │ │ │ ├── 49.sql │ │ │ │ ├── 5.sql │ │ │ │ ├── 50.sql │ │ │ │ ├── 51.sql │ │ │ │ ├── 52.sql │ │ │ │ ├── 53.sql │ │ │ │ ├── 54.sql │ │ │ │ ├── 55.sql │ │ │ │ ├── 56.sql │ │ │ │ ├── 57.sql │ │ │ │ ├── 58.sql │ │ │ │ ├── 59.sql │ │ │ │ ├── 6.sql │ │ │ │ ├── 60.sql │ │ │ │ ├── 61.sql │ │ │ │ ├── 62.sql │ │ │ │ ├── 63.sql │ │ │ │ ├── 64.sql │ │ │ │ ├── 65.sql │ │ │ │ ├── 66.sql │ │ │ │ ├── 67.sql │ │ │ │ ├── 68.sql │ │ │ │ ├── 69.sql │ │ │ │ ├── 7.sql │ │ │ │ ├── 70.sql │ │ │ │ ├── 71.sql │ │ │ │ ├── 72.sql │ │ │ │ ├── 73.sql │ │ │ │ ├── 74.sql │ │ │ │ ├── 75.sql │ │ │ │ ├── 8.sql │ │ │ │ ├── 9.sql │ │ │ │ └── README │ │ │ ├── init.sql │ │ │ ├── init_readings.sql │ │ │ ├── schema_update.sh │ │ │ └── upgrade/ │ │ │ ├── 10.sql │ │ │ ├── 11.sql │ │ │ ├── 12.sql │ │ │ ├── 13.sql │ │ │ ├── 14.sql │ │ │ ├── 15.sql │ │ │ ├── 16.sql │ │ │ ├── 17.sql │ │ │ ├── 18.sql │ │ │ ├── 19.sql │ │ │ ├── 2.sql │ │ │ ├── 20.sql │ │ │ ├── 21.sql │ │ │ ├── 22.sql │ │ │ ├── 23.sql │ │ │ ├── 24.sql │ │ │ ├── 25.sql │ │ │ ├── 26.sql │ │ │ ├── 27.sql │ │ │ ├── 28.sql │ │ │ ├── 29.sql │ │ │ ├── 3.sql │ │ │ ├── 30.sql │ │ │ ├── 31.sql │ │ │ ├── 32.sql │ │ │ ├── 33.sql │ │ │ ├── 34.sql │ │ │ ├── 35.sh │ │ │ ├── 35.sql │ │ │ ├── 36.sql │ │ │ ├── 37.sql │ │ │ ├── 38.sh │ │ │ ├── 38.sql │ │ │ ├── 39.sql │ │ │ ├── 4.sql │ │ │ ├── 40.sql │ │ │ ├── 41.sql │ │ │ ├── 42.sql │ │ │ ├── 43.sql │ │ │ ├── 44.sql │ │ │ ├── 45.sql │ │ │ ├── 46.sql │ │ │ ├── 47.sql │ │ │ ├── 48.sql │ │ │ ├── 49.sql │ │ │ ├── 5.sql │ │ │ ├── 50.sql │ │ │ ├── 51.sql │ │ │ ├── 52.sql │ │ │ ├── 53.sql │ │ │ ├── 54.sql │ │ │ ├── 55.sql │ │ │ ├── 56.sql │ │ │ ├── 57.sql │ │ │ ├── 58.sql │ │ │ ├── 59.sql │ │ │ ├── 6.sql │ │ │ ├── 60.sql │ │ │ ├── 61.sql │ │ │ ├── 62.sql │ │ │ ├── 63.sql │ │ │ ├── 64.sql │ │ │ ├── 65.sql │ │ │ ├── 66.sql │ │ │ ├── 67.sql │ │ │ ├── 68.sql │ │ │ ├── 69.sql │ │ │ ├── 7.sql │ │ │ ├── 70.sql │ │ │ ├── 71.sql │ │ │ ├── 72.sql │ │ │ ├── 73.sql │ │ │ ├── 74.sql │ │ │ ├── 75.sql │ │ │ ├── 76.sql │ │ │ ├── 8.sql │ │ │ ├── 9.sql │ │ │ └── README │ │ ├── sqlite.sh │ │ ├── sqlitelb/ │ │ │ ├── downgrade/ │ │ │ │ ├── 42.sql │ │ │ │ ├── 43.sql │ │ │ │ ├── 44.sql │ │ │ │ ├── 45.sql │ │ │ │ ├── 46.sql │ │ │ │ ├── 47.sql │ │ │ │ ├── 48.sql │ │ │ │ ├── 49.sql │ │ │ │ ├── 50.sql │ │ │ │ ├── 51.sql │ │ │ │ ├── 52.sql │ │ │ │ ├── 53.sql │ │ │ │ ├── 54.sql │ │ │ │ ├── 55.sql │ │ │ │ ├── 56.sql │ │ │ │ ├── 57.sql │ │ │ │ ├── 58.sql │ │ │ │ ├── 59.sql │ │ │ │ ├── 60.sql │ │ │ │ ├── 61.sql │ │ │ │ ├── 62.sql │ │ │ │ ├── 63.sql │ │ │ │ ├── 64.sql │ │ │ │ ├── 65.sql │ │ │ │ ├── 66.sql │ │ │ │ ├── 67.sql │ │ │ │ ├── 68.sql │ │ │ │ ├── 69.sql │ │ │ │ ├── 70.sql │ │ │ │ ├── 71.sql │ │ │ │ ├── 72.sql │ │ │ │ ├── 73.sql │ │ │ │ ├── 74.sql │ │ │ │ ├── 75.sql │ │ │ │ └── README │ │ │ ├── init.sql │ │ │ ├── init_readings.sql │ │ │ ├── schema_update.sh │ │ │ └── upgrade/ │ │ │ ├── 43.sql │ │ │ ├── 44.sql │ │ │ ├── 45.sql │ │ │ ├── 46.sql │ │ │ ├── 47.sql │ │ │ ├── 48.sql │ │ │ ├── 49.sql │ │ │ ├── 50.sql │ │ │ ├── 51.sql │ │ │ ├── 52.sql │ │ │ ├── 53.sql │ │ │ ├── 54.sql │ │ │ ├── 55.sql │ │ │ ├── 56.sql │ │ │ ├── 57.sql │ │ │ ├── 58.sql │ │ │ ├── 59.sql │ │ │ ├── 60.sql │ │ │ ├── 61.sql │ │ │ ├── 62.sql │ │ │ ├── 63.sql │ │ │ ├── 64.sql │ │ │ ├── 65.sql │ │ │ ├── 66.sql │ │ │ ├── 67.sql │ │ │ ├── 68.sql │ │ │ ├── 69.sql │ │ │ ├── 70.sql │ │ │ ├── 71.sql │ │ │ ├── 72.sql │ │ │ ├── 73.sql │ │ │ ├── 74.sql │ │ │ ├── 75.sql │ │ │ ├── 76.sql │ │ │ └── README │ │ └── sqlitelb.sh │ ├── services/ │ │ ├── README.rst │ │ ├── bucket_storage_c │ │ ├── dispatcher_c │ │ ├── north_C │ │ ├── notification_c │ │ ├── pipeline_c │ │ ├── south_c │ │ └── storage │ ├── storage │ └── tasks/ │ ├── README.rst │ ├── automation_script │ ├── backup │ ├── check_certs │ ├── check_updates │ ├── north_c │ ├── purge │ ├── purge_system │ ├── restore │ └── statistics ├── tests/ │ ├── .gitignore │ ├── README.rst │ ├── __init__.py │ ├── system/ │ │ ├── __init__.py │ │ ├── common/ │ │ │ ├── clean_pi_system.py │ │ │ └── scripts/ │ │ │ └── reset_user_authentication │ │ ├── lab/ │ │ │ ├── README.rst │ │ │ ├── check_env │ │ │ ├── install │ │ │ ├── remove │ │ │ ├── reset │ │ │ ├── run │ │ │ ├── run_until_fails │ │ │ ├── scripts/ │ │ │ │ ├── ema.py │ │ │ │ ├── flash_leds.py │ │ │ │ ├── trendc.py │ │ │ │ └── write_out.py │ │ │ ├── test │ │ │ ├── test.config │ │ │ └── verify_clean_pi.py │ │ ├── memory_leak/ │ │ │ ├── config.sh │ │ │ ├── scripts/ │ │ │ │ ├── log_analyzer │ │ │ │ ├── reset │ │ │ │ └── setup │ │ │ ├── test_memcheck.sh │ │ │ └── valgrind-python.supp │ │ ├── plugins/ │ │ │ ├── README.rst │ │ │ └── south/ │ │ │ └── fledge-south-testcard/ │ │ │ ├── .gitignore │ │ │ ├── CMakeLists.txt │ │ │ ├── Description │ │ │ ├── FindFledge.cmake │ │ │ ├── LICENSE │ │ │ ├── Package │ │ │ ├── README.rst │ │ │ ├── VERSION │ │ │ ├── fledge.version │ │ │ ├── mkversion │ │ │ ├── plugin.cpp │ │ │ └── requirements.sh │ │ └── python/ │ │ ├── README.rst │ │ ├── api/ │ │ │ ├── control_service/ │ │ │ │ ├── test_entrypoint.py │ │ │ │ └── test_pipeline.py │ │ │ ├── test_alerts.py │ │ │ ├── test_audit.py │ │ │ ├── test_authentication.py │ │ │ ├── test_browser_assets.py │ │ │ ├── test_common.py │ │ │ ├── test_configuration.py │ │ │ ├── test_endpoints_with_different_user_types.py │ │ │ ├── test_notification.py │ │ │ ├── test_passwords.py │ │ │ ├── test_plugin_discovery.py │ │ │ ├── test_service.py │ │ │ └── test_statistics.py │ │ ├── conftest.py │ │ ├── data/ │ │ │ ├── dummyplugin.py │ │ │ ├── notify35.py │ │ │ ├── vibration.csv │ │ │ └── wind-data.csv │ │ ├── e2e/ │ │ │ ├── docs/ │ │ │ │ ├── test_e2e_coap_PI.rst │ │ │ │ ├── test_e2e_csv_PI.rst │ │ │ │ ├── test_e2e_csv_multi_filter_pi.rst │ │ │ │ ├── test_e2e_expr_pi.rst │ │ │ │ ├── test_e2e_filter_fft_threshold.rst │ │ │ │ ├── test_e2e_kafka.rst │ │ │ │ ├── test_e2e_modbus_c_pi.rst │ │ │ │ ├── test_e2e_notification_service_with_plugins.rst │ │ │ │ └── test_south_service_tuning.rst │ │ │ ├── test_e2e_coap_OCS.py │ │ │ ├── test_e2e_coap_PI.py │ │ │ ├── test_e2e_csv_PI.py │ │ │ ├── test_e2e_csv_multi_filter_pi.py │ │ │ ├── test_e2e_expr_pi.py │ │ │ ├── test_e2e_filter_fft_threshold.py │ │ │ ├── test_e2e_kafka.py │ │ │ ├── test_e2e_modbus_c_pi.py │ │ │ ├── test_e2e_modbus_c_rtu_pi.py │ │ │ ├── test_e2e_notification_service_with_plugins.py │ │ │ ├── test_e2e_pi_scaleset.py │ │ │ ├── test_e2e_vary_asset_http_pi.py │ │ │ └── test_south_service_tuning.py │ │ ├── fledge/ │ │ │ └── plugins/ │ │ │ ├── filter/ │ │ │ │ └── imageblock/ │ │ │ │ ├── README.rst │ │ │ │ ├── __init__.py │ │ │ │ └── imageblock.py │ │ │ └── south/ │ │ │ └── imagetest/ │ │ │ ├── __init__.py │ │ │ ├── imagetest.py │ │ │ └── readme.rst │ │ ├── helpers/ │ │ │ └── utils.py │ │ ├── iprpc/ │ │ │ ├── README.rst │ │ │ └── test_iprpc.py │ │ ├── packages/ │ │ │ ├── README.rst │ │ │ ├── data/ │ │ │ │ ├── package_list.json │ │ │ │ ├── readings35.py │ │ │ │ └── set_id.py │ │ │ ├── docs/ │ │ │ │ ├── test_authentication.rst │ │ │ │ ├── test_filters.rst │ │ │ │ ├── test_multiple_assets.rst │ │ │ │ ├── test_north_azure.rst │ │ │ │ ├── test_north_pi_webapi_nw_throttle.rst │ │ │ │ ├── test_omf_naming_scheme.rst │ │ │ │ ├── test_omf_north_service.rst │ │ │ │ ├── test_pi_webapi.rst │ │ │ │ ├── test_pi_webapi_linked_data_type.rst │ │ │ │ ├── test_rule_data_availability.rst │ │ │ │ └── test_statistics_history_notification_rule.rst │ │ │ ├── network_impairment.py │ │ │ ├── test_authentication.py │ │ │ ├── test_available_and_install_api.py │ │ │ ├── test_eds.py │ │ │ ├── test_filters.py │ │ │ ├── test_lab.py │ │ │ ├── test_multiple_assets.py │ │ │ ├── test_north_azure.py │ │ │ ├── test_north_pi_webapi_nw_throttle.py │ │ │ ├── test_omf_naming_scheme.py │ │ │ ├── test_omf_north_service.py │ │ │ ├── test_opcua.py │ │ │ ├── test_pi_webapi.py │ │ │ ├── test_pi_webapi_linked_data_type.py │ │ │ ├── test_rule_data_availability.py │ │ │ └── test_statistics_history_notification_rule.py │ │ ├── pair/ │ │ │ ├── docs/ │ │ │ │ ├── test_c_north_service_pair.rst │ │ │ │ ├── test_e2e_fledge_pair.rst │ │ │ │ └── test_python_north_service_pair.rst │ │ │ ├── test_c_north_service_pair.py │ │ │ ├── test_e2e_fledge_pair.py │ │ │ └── test_python_north_service_pair.py │ │ ├── plugin_and_service.py │ │ ├── plugins/ │ │ │ ├── dummy/ │ │ │ │ └── iprpc/ │ │ │ │ ├── filter/ │ │ │ │ │ ├── numpy_filter/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── numpy_filter.py │ │ │ │ │ └── numpy_iprpc_filter/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── np_server.py │ │ │ │ │ └── numpy_iprpc_filter.py │ │ │ │ └── south/ │ │ │ │ └── numpy_south/ │ │ │ │ ├── __init__.py │ │ │ │ └── numpy_south.py │ │ │ ├── notificationDelivery/ │ │ │ │ └── send/ │ │ │ │ └── send.py │ │ │ └── notificationRule/ │ │ │ └── numpy_image/ │ │ │ └── numpy_image.py │ │ ├── pytest.ini │ │ ├── rpi/ │ │ │ └── test_e2e_rpi_ephat.py │ │ ├── scripts/ │ │ │ ├── install_c_plugin │ │ │ ├── install_c_service │ │ │ ├── install_python_plugin │ │ │ ├── package/ │ │ │ │ ├── remove │ │ │ │ ├── reset │ │ │ │ └── setup │ │ │ └── reset_plugins │ │ └── smoke/ │ │ └── test_smoke.py │ └── unit/ │ ├── C/ │ │ ├── CMakeLists.txt │ │ ├── CodeCoverage.cmake │ │ ├── README.rst │ │ ├── cmake_pg/ │ │ │ ├── CMakeLists.txt │ │ │ └── CheckRhPg.cmake │ │ ├── cmake_sqlite/ │ │ │ ├── CMakeLists.txt │ │ │ └── Findsqlite3.cmake │ │ ├── cmake_sqliteM/ │ │ │ ├── CMakeLists.txt │ │ │ └── Findsqlite3.cmake │ │ ├── cmake_sqlitelb/ │ │ │ ├── CMakeLists.txt │ │ │ └── Findsqlite3.cmake │ │ ├── common/ │ │ │ ├── CMakeLists.txt │ │ │ ├── main.cpp │ │ │ ├── test_JSONPath.cpp │ │ │ ├── test_circular_buffer.cpp │ │ │ ├── test_config_category.cpp │ │ │ ├── test_createDirectory.cpp │ │ │ ├── test_default_config_category.cpp │ │ │ ├── test_expression.cpp │ │ │ ├── test_imageencode.cpp │ │ │ ├── test_insert_value.cpp │ │ │ ├── test_json_reading.cpp │ │ │ ├── test_json_utils.cpp │ │ │ ├── test_log_interceptor.cpp │ │ │ ├── test_purge_result.cpp │ │ │ ├── test_python_reading.cpp │ │ │ ├── test_python_reading_set.cpp │ │ │ ├── test_python_readingnumpy.cpp │ │ │ ├── test_query.cpp │ │ │ ├── test_reading.cpp │ │ │ ├── test_reading_array.cpp │ │ │ ├── test_reading_set.cpp │ │ │ ├── test_readingset_merge.cpp │ │ │ ├── test_resultset.cpp │ │ │ ├── test_service_record.cpp │ │ │ └── test_string_utils.cpp │ │ ├── plugins/ │ │ │ ├── common/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── main.cpp │ │ │ │ ├── test_omf_translation.cpp │ │ │ │ └── test_omf_translation_piwebapi.cpp │ │ │ └── storage/ │ │ │ ├── common/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── README.rst │ │ │ │ └── tests.cpp │ │ │ ├── postgres/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── CheckRhPg.cmake │ │ │ │ ├── README.rst │ │ │ │ └── tests.cpp │ │ │ ├── sqlite/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── README.rst │ │ │ │ └── tests.cpp │ │ │ ├── sqlitelb/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── README.rst │ │ │ │ └── tests.cpp │ │ │ └── sqlitememory/ │ │ │ ├── CMakeLists.txt │ │ │ ├── README.rst │ │ │ └── sqlmem_tests.cpp │ │ ├── requirements.sh │ │ ├── scripts/ │ │ │ └── RunAllTests.sh │ │ └── services/ │ │ ├── core/ │ │ │ ├── CMakeLists.txt │ │ │ ├── main.cpp │ │ │ ├── reading_set_copy.cpp │ │ │ └── test_service_regsitery.cpp │ │ └── storage/ │ │ ├── postgres/ │ │ │ ├── CMakeLists.txt │ │ │ ├── DeleteRows.json │ │ │ ├── GetTable.json │ │ │ ├── PostStorageSchema.json │ │ │ ├── PostTable.json │ │ │ ├── PutQuery.json │ │ │ ├── PutTable.json │ │ │ ├── PutTableExpression.json │ │ │ ├── README.rst │ │ │ ├── expected_ETC_UTC/ │ │ │ │ ├── 1 │ │ │ │ ├── 10 │ │ │ │ ├── 100 │ │ │ │ ├── 101 │ │ │ │ ├── 102 │ │ │ │ ├── 103 │ │ │ │ ├── 104 │ │ │ │ ├── 105 │ │ │ │ ├── 106 │ │ │ │ ├── 107 │ │ │ │ ├── 108 │ │ │ │ ├── 109 │ │ │ │ ├── 11 │ │ │ │ ├── 110 │ │ │ │ ├── 111 │ │ │ │ ├── 112 │ │ │ │ ├── 113 │ │ │ │ ├── 115 │ │ │ │ ├── 116 │ │ │ │ ├── 12 │ │ │ │ ├── 13 │ │ │ │ ├── 14 │ │ │ │ ├── 15 │ │ │ │ ├── 16 │ │ │ │ ├── 17 │ │ │ │ ├── 18 │ │ │ │ ├── 19 │ │ │ │ ├── 2 │ │ │ │ ├── 20 │ │ │ │ ├── 21 │ │ │ │ ├── 22 │ │ │ │ ├── 23 │ │ │ │ ├── 24 │ │ │ │ ├── 25 │ │ │ │ ├── 26 │ │ │ │ ├── 27 │ │ │ │ ├── 28 │ │ │ │ ├── 29 │ │ │ │ ├── 3 │ │ │ │ ├── 30 │ │ │ │ ├── 31 │ │ │ │ ├── 32 │ │ │ │ ├── 33 │ │ │ │ ├── 34 │ │ │ │ ├── 35 │ │ │ │ ├── 37 │ │ │ │ ├── 38 │ │ │ │ ├── 39 │ │ │ │ ├── 4 │ │ │ │ ├── 40 │ │ │ │ ├── 41 │ │ │ │ ├── 42 │ │ │ │ ├── 43 │ │ │ │ ├── 44 │ │ │ │ ├── 45 │ │ │ │ ├── 46 │ │ │ │ ├── 47 │ │ │ │ ├── 48 │ │ │ │ ├── 49 │ │ │ │ ├── 5 │ │ │ │ ├── 50 │ │ │ │ ├── 51 │ │ │ │ ├── 52 │ │ │ │ ├── 53 │ │ │ │ ├── 54 │ │ │ │ ├── 55 │ │ │ │ ├── 56 │ │ │ │ ├── 57 │ │ │ │ ├── 58 │ │ │ │ ├── 59 │ │ │ │ ├── 6 │ │ │ │ ├── 60 │ │ │ │ ├── 61 │ │ │ │ ├── 62 │ │ │ │ ├── 63 │ │ │ │ ├── 64 │ │ │ │ ├── 65 │ │ │ │ ├── 66 │ │ │ │ ├── 67 │ │ │ │ ├── 68 │ │ │ │ ├── 69 │ │ │ │ ├── 7 │ │ │ │ ├── 70 │ │ │ │ ├── 71 │ │ │ │ ├── 72 │ │ │ │ ├── 73 │ │ │ │ ├── 74 │ │ │ │ ├── 75 │ │ │ │ ├── 76 │ │ │ │ ├── 77 │ │ │ │ ├── 78 │ │ │ │ ├── 79 │ │ │ │ ├── 8 │ │ │ │ ├── 80 │ │ │ │ ├── 81 │ │ │ │ ├── 82 │ │ │ │ ├── 83 │ │ │ │ ├── 84 │ │ │ │ ├── 85 │ │ │ │ ├── 86 │ │ │ │ ├── 87 │ │ │ │ ├── 88 │ │ │ │ ├── 89 │ │ │ │ ├── 9 │ │ │ │ ├── 90 │ │ │ │ ├── 91 │ │ │ │ ├── 92 │ │ │ │ ├── 93 │ │ │ │ ├── 94 │ │ │ │ ├── 95 │ │ │ │ ├── 96 │ │ │ │ ├── 97 │ │ │ │ ├── 98 │ │ │ │ └── 99 │ │ │ ├── expected_ETC_UTC_PG12/ │ │ │ │ ├── 1 │ │ │ │ ├── 10 │ │ │ │ ├── 100 │ │ │ │ ├── 101 │ │ │ │ ├── 102 │ │ │ │ ├── 103 │ │ │ │ ├── 104 │ │ │ │ ├── 105 │ │ │ │ ├── 106 │ │ │ │ ├── 107 │ │ │ │ ├── 108 │ │ │ │ ├── 109 │ │ │ │ ├── 11 │ │ │ │ ├── 110 │ │ │ │ ├── 111 │ │ │ │ ├── 112 │ │ │ │ ├── 113 │ │ │ │ ├── 115 │ │ │ │ ├── 116 │ │ │ │ ├── 12 │ │ │ │ ├── 13 │ │ │ │ ├── 14 │ │ │ │ ├── 15 │ │ │ │ ├── 16 │ │ │ │ ├── 17 │ │ │ │ ├── 18 │ │ │ │ ├── 19 │ │ │ │ ├── 2 │ │ │ │ ├── 20 │ │ │ │ ├── 21 │ │ │ │ ├── 22 │ │ │ │ ├── 23 │ │ │ │ ├── 24 │ │ │ │ ├── 25 │ │ │ │ ├── 26 │ │ │ │ ├── 27 │ │ │ │ ├── 28 │ │ │ │ ├── 29 │ │ │ │ ├── 3 │ │ │ │ ├── 30 │ │ │ │ ├── 31 │ │ │ │ ├── 32 │ │ │ │ ├── 33 │ │ │ │ ├── 34 │ │ │ │ ├── 35 │ │ │ │ ├── 37 │ │ │ │ ├── 38 │ │ │ │ ├── 39 │ │ │ │ ├── 4 │ │ │ │ ├── 40 │ │ │ │ ├── 41 │ │ │ │ ├── 42 │ │ │ │ ├── 43 │ │ │ │ ├── 44 │ │ │ │ ├── 45 │ │ │ │ ├── 46 │ │ │ │ ├── 47 │ │ │ │ ├── 48 │ │ │ │ ├── 49 │ │ │ │ ├── 5 │ │ │ │ ├── 50 │ │ │ │ ├── 51 │ │ │ │ ├── 52 │ │ │ │ ├── 53 │ │ │ │ ├── 54 │ │ │ │ ├── 55 │ │ │ │ ├── 56 │ │ │ │ ├── 57 │ │ │ │ ├── 58 │ │ │ │ ├── 59 │ │ │ │ ├── 6 │ │ │ │ ├── 60 │ │ │ │ ├── 61 │ │ │ │ ├── 62 │ │ │ │ ├── 63 │ │ │ │ ├── 64 │ │ │ │ ├── 65 │ │ │ │ ├── 66 │ │ │ │ ├── 67 │ │ │ │ ├── 68 │ │ │ │ ├── 69 │ │ │ │ ├── 7 │ │ │ │ ├── 70 │ │ │ │ ├── 71 │ │ │ │ ├── 72 │ │ │ │ ├── 73 │ │ │ │ ├── 74 │ │ │ │ ├── 75 │ │ │ │ ├── 76 │ │ │ │ ├── 77 │ │ │ │ ├── 78 │ │ │ │ ├── 79 │ │ │ │ ├── 8 │ │ │ │ ├── 80 │ │ │ │ ├── 81 │ │ │ │ ├── 82 │ │ │ │ ├── 83 │ │ │ │ ├── 84 │ │ │ │ ├── 85 │ │ │ │ ├── 86 │ │ │ │ ├── 87 │ │ │ │ ├── 88 │ │ │ │ ├── 89 │ │ │ │ ├── 9 │ │ │ │ ├── 90 │ │ │ │ ├── 91 │ │ │ │ ├── 92 │ │ │ │ ├── 93 │ │ │ │ ├── 94 │ │ │ │ ├── 95 │ │ │ │ ├── 96 │ │ │ │ ├── 97 │ │ │ │ ├── 98 │ │ │ │ └── 99 │ │ │ ├── expected_EUROPE_ROME/ │ │ │ │ ├── 1 │ │ │ │ ├── 10 │ │ │ │ ├── 100 │ │ │ │ ├── 101 │ │ │ │ ├── 102 │ │ │ │ ├── 103 │ │ │ │ ├── 104 │ │ │ │ ├── 105 │ │ │ │ ├── 106 │ │ │ │ ├── 107 │ │ │ │ ├── 108 │ │ │ │ ├── 109 │ │ │ │ ├── 11 │ │ │ │ ├── 110 │ │ │ │ ├── 111 │ │ │ │ ├── 112 │ │ │ │ ├── 113 │ │ │ │ ├── 115 │ │ │ │ ├── 116 │ │ │ │ ├── 12 │ │ │ │ ├── 13 │ │ │ │ ├── 14 │ │ │ │ ├── 15 │ │ │ │ ├── 16 │ │ │ │ ├── 17 │ │ │ │ ├── 18 │ │ │ │ ├── 19 │ │ │ │ ├── 2 │ │ │ │ ├── 20 │ │ │ │ ├── 21 │ │ │ │ ├── 22 │ │ │ │ ├── 23 │ │ │ │ ├── 24 │ │ │ │ ├── 25 │ │ │ │ ├── 26 │ │ │ │ ├── 27 │ │ │ │ ├── 28 │ │ │ │ ├── 29 │ │ │ │ ├── 3 │ │ │ │ ├── 30 │ │ │ │ ├── 31 │ │ │ │ ├── 32 │ │ │ │ ├── 33 │ │ │ │ ├── 34 │ │ │ │ ├── 35 │ │ │ │ ├── 37 │ │ │ │ ├── 38 │ │ │ │ ├── 39 │ │ │ │ ├── 4 │ │ │ │ ├── 40 │ │ │ │ ├── 41 │ │ │ │ ├── 42 │ │ │ │ ├── 43 │ │ │ │ ├── 44 │ │ │ │ ├── 45 │ │ │ │ ├── 46 │ │ │ │ ├── 47 │ │ │ │ ├── 48 │ │ │ │ ├── 49 │ │ │ │ ├── 5 │ │ │ │ ├── 50 │ │ │ │ ├── 51 │ │ │ │ ├── 52 │ │ │ │ ├── 53 │ │ │ │ ├── 54 │ │ │ │ ├── 55 │ │ │ │ ├── 56 │ │ │ │ ├── 57 │ │ │ │ ├── 58 │ │ │ │ ├── 59 │ │ │ │ ├── 6 │ │ │ │ ├── 60 │ │ │ │ ├── 61 │ │ │ │ ├── 62 │ │ │ │ ├── 63 │ │ │ │ ├── 64 │ │ │ │ ├── 65 │ │ │ │ ├── 66 │ │ │ │ ├── 67 │ │ │ │ ├── 68 │ │ │ │ ├── 69 │ │ │ │ ├── 7 │ │ │ │ ├── 70 │ │ │ │ ├── 71 │ │ │ │ ├── 72 │ │ │ │ ├── 73 │ │ │ │ ├── 74 │ │ │ │ ├── 75 │ │ │ │ ├── 76 │ │ │ │ ├── 77 │ │ │ │ ├── 78 │ │ │ │ ├── 79 │ │ │ │ ├── 8 │ │ │ │ ├── 80 │ │ │ │ ├── 81 │ │ │ │ ├── 82 │ │ │ │ ├── 83 │ │ │ │ ├── 84 │ │ │ │ ├── 85 │ │ │ │ ├── 86 │ │ │ │ ├── 87 │ │ │ │ ├── 88 │ │ │ │ ├── 89 │ │ │ │ ├── 9 │ │ │ │ ├── 90 │ │ │ │ ├── 91 │ │ │ │ ├── 92 │ │ │ │ ├── 93 │ │ │ │ ├── 94 │ │ │ │ ├── 95 │ │ │ │ ├── 96 │ │ │ │ ├── 97 │ │ │ │ ├── 98 │ │ │ │ └── 99 │ │ │ ├── expected_EUROPE_ROME_PG12/ │ │ │ │ ├── 1 │ │ │ │ ├── 10 │ │ │ │ ├── 100 │ │ │ │ ├── 101 │ │ │ │ ├── 102 │ │ │ │ ├── 103 │ │ │ │ ├── 104 │ │ │ │ ├── 105 │ │ │ │ ├── 106 │ │ │ │ ├── 107 │ │ │ │ ├── 108 │ │ │ │ ├── 109 │ │ │ │ ├── 11 │ │ │ │ ├── 110 │ │ │ │ ├── 111 │ │ │ │ ├── 112 │ │ │ │ ├── 113 │ │ │ │ ├── 115 │ │ │ │ ├── 116 │ │ │ │ ├── 12 │ │ │ │ ├── 13 │ │ │ │ ├── 14 │ │ │ │ ├── 15 │ │ │ │ ├── 16 │ │ │ │ ├── 17 │ │ │ │ ├── 18 │ │ │ │ ├── 19 │ │ │ │ ├── 2 │ │ │ │ ├── 20 │ │ │ │ ├── 21 │ │ │ │ ├── 22 │ │ │ │ ├── 23 │ │ │ │ ├── 24 │ │ │ │ ├── 25 │ │ │ │ ├── 26 │ │ │ │ ├── 27 │ │ │ │ ├── 28 │ │ │ │ ├── 29 │ │ │ │ ├── 3 │ │ │ │ ├── 30 │ │ │ │ ├── 31 │ │ │ │ ├── 32 │ │ │ │ ├── 33 │ │ │ │ ├── 34 │ │ │ │ ├── 35 │ │ │ │ ├── 37 │ │ │ │ ├── 38 │ │ │ │ ├── 39 │ │ │ │ ├── 4 │ │ │ │ ├── 40 │ │ │ │ ├── 41 │ │ │ │ ├── 42 │ │ │ │ ├── 43 │ │ │ │ ├── 44 │ │ │ │ ├── 45 │ │ │ │ ├── 46 │ │ │ │ ├── 47 │ │ │ │ ├── 48 │ │ │ │ ├── 49 │ │ │ │ ├── 5 │ │ │ │ ├── 50 │ │ │ │ ├── 51 │ │ │ │ ├── 52 │ │ │ │ ├── 53 │ │ │ │ ├── 54 │ │ │ │ ├── 55 │ │ │ │ ├── 56 │ │ │ │ ├── 57 │ │ │ │ ├── 58 │ │ │ │ ├── 59 │ │ │ │ ├── 6 │ │ │ │ ├── 60 │ │ │ │ ├── 61 │ │ │ │ ├── 62 │ │ │ │ ├── 63 │ │ │ │ ├── 64 │ │ │ │ ├── 65 │ │ │ │ ├── 66 │ │ │ │ ├── 67 │ │ │ │ ├── 68 │ │ │ │ ├── 69 │ │ │ │ ├── 7 │ │ │ │ ├── 70 │ │ │ │ ├── 71 │ │ │ │ ├── 72 │ │ │ │ ├── 73 │ │ │ │ ├── 74 │ │ │ │ ├── 75 │ │ │ │ ├── 76 │ │ │ │ ├── 77 │ │ │ │ ├── 78 │ │ │ │ ├── 79 │ │ │ │ ├── 8 │ │ │ │ ├── 80 │ │ │ │ ├── 81 │ │ │ │ ├── 82 │ │ │ │ ├── 83 │ │ │ │ ├── 84 │ │ │ │ ├── 85 │ │ │ │ ├── 86 │ │ │ │ ├── 87 │ │ │ │ ├── 88 │ │ │ │ ├── 89 │ │ │ │ ├── 9 │ │ │ │ ├── 90 │ │ │ │ ├── 91 │ │ │ │ ├── 92 │ │ │ │ ├── 93 │ │ │ │ ├── 94 │ │ │ │ ├── 95 │ │ │ │ ├── 96 │ │ │ │ ├── 97 │ │ │ │ ├── 98 │ │ │ │ └── 99 │ │ │ ├── makeReadings.sh │ │ │ ├── payload1.json │ │ │ ├── payload2.json │ │ │ ├── payload3.json │ │ │ ├── payload4.json │ │ │ ├── payload5.json │ │ │ ├── payload6.json │ │ │ ├── payload7.json │ │ │ ├── payload8.json │ │ │ ├── payload9.json │ │ │ ├── payloads/ │ │ │ │ ├── FOGL-983.json │ │ │ │ ├── add_snapshot.json │ │ │ │ ├── addnew.json │ │ │ │ ├── asset.json │ │ │ │ ├── bad_sort_1.json │ │ │ │ ├── bad_sort_2.json │ │ │ │ ├── bad_update.json │ │ │ │ ├── badreadings.json │ │ │ │ ├── count_assets.json │ │ │ │ ├── delete.json │ │ │ │ ├── fogl690-error.json │ │ │ │ ├── fogl690-ok.json │ │ │ │ ├── get-FOGL-983.json │ │ │ │ ├── get_updated_complex_JSON.json │ │ │ │ ├── group.json │ │ │ │ ├── group_time.json │ │ │ │ ├── insert.json │ │ │ │ ├── insert2.json │ │ │ │ ├── insert_bad.json │ │ │ │ ├── insert_bad2.json │ │ │ │ ├── limit.json │ │ │ │ ├── limit_max_int.json │ │ │ │ ├── msec_add_readings_user_ts.json │ │ │ │ ├── msec_query_asset_aggmin.json │ │ │ │ ├── msec_query_asset_aggminarray.json │ │ │ │ ├── msec_query_asset_alias.json │ │ │ │ ├── msec_query_asset_noalias.json │ │ │ │ ├── msec_query_readings.json │ │ │ │ ├── multi_and.json │ │ │ │ ├── multi_mixed.json │ │ │ │ ├── multi_or.json │ │ │ │ ├── newer.json │ │ │ │ ├── newerBad.json │ │ │ │ ├── older.json │ │ │ │ ├── put_function_in_JSON.json │ │ │ │ ├── put_json_in_JSON.json │ │ │ │ ├── query_readings.json │ │ │ │ ├── query_readings_in.json │ │ │ │ ├── query_readings_in_bad_values.json │ │ │ │ ├── query_readings_not_in.json │ │ │ │ ├── query_readings_timebucket.json │ │ │ │ ├── query_readings_timebucket1.json │ │ │ │ ├── query_readings_timebucket_bad.json │ │ │ │ ├── query_timebucket_datapoints.json │ │ │ │ ├── read_id_1xx.json │ │ │ │ ├── reading_property.json │ │ │ │ ├── reading_property_array.json │ │ │ │ ├── reading_property_bad.json │ │ │ │ ├── readings.json │ │ │ │ ├── readings_timebucket.json │ │ │ │ ├── skip.json │ │ │ │ ├── skip_max_int.json │ │ │ │ ├── sort.json │ │ │ │ ├── sort2.json │ │ │ │ ├── timezone.json │ │ │ │ ├── timezone_bad.json │ │ │ │ ├── update.json │ │ │ │ ├── updateKey.json │ │ │ │ ├── update_bad.json │ │ │ │ ├── update_expression.json │ │ │ │ ├── update_json.json │ │ │ │ ├── update_json2.json │ │ │ │ ├── where_avg.json │ │ │ │ ├── where_bad_1.json │ │ │ │ ├── where_bad_2.json │ │ │ │ ├── where_bad_3.json │ │ │ │ ├── where_bad_4.json │ │ │ │ ├── where_bad_format1.json │ │ │ │ ├── where_bad_format2.json │ │ │ │ ├── where_count.json │ │ │ │ ├── where_count_star.json │ │ │ │ ├── where_distinct.json │ │ │ │ ├── where_id_1.json │ │ │ │ ├── where_id_1_r1.json │ │ │ │ ├── where_id_1_r2.json │ │ │ │ ├── where_id_1_r3.json │ │ │ │ ├── where_id_2.json │ │ │ │ ├── where_id_not_1.json │ │ │ │ ├── where_in.json │ │ │ │ ├── where_in_bad_values.json │ │ │ │ ├── where_like.json │ │ │ │ ├── where_max.json │ │ │ │ ├── where_min.json │ │ │ │ ├── where_multi_aggregatee.json │ │ │ │ ├── where_not_in.json │ │ │ │ ├── where_numeric_column.json │ │ │ │ ├── where_sum.json │ │ │ │ ├── where_test2_d1.json │ │ │ │ ├── where_test2_d2.json │ │ │ │ ├── where_test2_d3.json │ │ │ │ ├── where_test2_d4.json │ │ │ │ └── where_test2_d5.json │ │ │ ├── plugins/ │ │ │ │ └── common/ │ │ │ │ ├── CMakeLists.txt │ │ │ │ ├── README.rst │ │ │ │ └── tests.cpp │ │ │ ├── storageSchemaTest.sh │ │ │ ├── test1.sh │ │ │ ├── test2.sh │ │ │ ├── testCleanup.sh │ │ │ ├── testRunner.sh │ │ │ ├── testSetup.sh │ │ │ ├── tests.cpp │ │ │ └── testset │ │ └── sqlite/ │ │ ├── README.rst │ │ ├── expected_ETC_UTC/ │ │ │ ├── 1 │ │ │ ├── 10 │ │ │ ├── 100 │ │ │ ├── 101 │ │ │ ├── 102 │ │ │ ├── 103 │ │ │ ├── 104 │ │ │ ├── 105 │ │ │ ├── 106 │ │ │ ├── 107 │ │ │ ├── 108 │ │ │ ├── 109 │ │ │ ├── 11 │ │ │ ├── 110 │ │ │ ├── 111 │ │ │ ├── 112 │ │ │ ├── 113 │ │ │ ├── 114 │ │ │ ├── 115 │ │ │ ├── 116 │ │ │ ├── 117 │ │ │ ├── 118 │ │ │ ├── 119 │ │ │ ├── 12 │ │ │ ├── 120 │ │ │ ├── 121 │ │ │ ├── 122 │ │ │ ├── 123 │ │ │ ├── 124 │ │ │ ├── 125 │ │ │ ├── 126 │ │ │ ├── 127 │ │ │ ├── 128 │ │ │ ├── 13 │ │ │ ├── 14 │ │ │ ├── 15 │ │ │ ├── 16 │ │ │ ├── 17 │ │ │ ├── 18 │ │ │ ├── 19 │ │ │ ├── 2 │ │ │ ├── 20 │ │ │ ├── 21 │ │ │ ├── 22 │ │ │ ├── 23 │ │ │ ├── 24 │ │ │ ├── 25 │ │ │ ├── 26 │ │ │ ├── 27 │ │ │ ├── 28 │ │ │ ├── 29 │ │ │ ├── 3 │ │ │ ├── 30 │ │ │ ├── 31 │ │ │ ├── 32 │ │ │ ├── 33 │ │ │ ├── 34 │ │ │ ├── 35 │ │ │ ├── 37 │ │ │ ├── 38 │ │ │ ├── 39 │ │ │ ├── 4 │ │ │ ├── 40 │ │ │ ├── 41 │ │ │ ├── 42 │ │ │ ├── 43 │ │ │ ├── 44 │ │ │ ├── 45 │ │ │ ├── 46 │ │ │ ├── 47 │ │ │ ├── 48 │ │ │ ├── 49 │ │ │ ├── 5 │ │ │ ├── 50 │ │ │ ├── 51 │ │ │ ├── 52 │ │ │ ├── 53 │ │ │ ├── 54 │ │ │ ├── 55 │ │ │ ├── 56 │ │ │ ├── 57 │ │ │ ├── 58 │ │ │ ├── 59 │ │ │ ├── 6 │ │ │ ├── 60 │ │ │ ├── 61 │ │ │ ├── 62 │ │ │ ├── 63 │ │ │ ├── 64 │ │ │ ├── 65 │ │ │ ├── 66 │ │ │ ├── 67 │ │ │ ├── 68 │ │ │ ├── 69 │ │ │ ├── 7 │ │ │ ├── 70 │ │ │ ├── 71 │ │ │ ├── 72 │ │ │ ├── 73 │ │ │ ├── 74 │ │ │ ├── 75 │ │ │ ├── 76 │ │ │ ├── 77 │ │ │ ├── 78 │ │ │ ├── 79 │ │ │ ├── 8 │ │ │ ├── 80 │ │ │ ├── 81 │ │ │ ├── 82 │ │ │ ├── 83 │ │ │ ├── 84 │ │ │ ├── 85 │ │ │ ├── 86 │ │ │ ├── 87 │ │ │ ├── 88 │ │ │ ├── 89 │ │ │ ├── 9 │ │ │ ├── 90 │ │ │ ├── 91 │ │ │ ├── 92 │ │ │ ├── 93 │ │ │ ├── 94 │ │ │ ├── 95 │ │ │ ├── 96 │ │ │ ├── 97 │ │ │ ├── 98 │ │ │ └── 99 │ │ ├── expected_EUROPE_ROME/ │ │ │ ├── 1 │ │ │ ├── 10 │ │ │ ├── 100 │ │ │ ├── 101 │ │ │ ├── 102 │ │ │ ├── 103 │ │ │ ├── 104 │ │ │ ├── 105 │ │ │ ├── 106 │ │ │ ├── 107 │ │ │ ├── 108 │ │ │ ├── 109 │ │ │ ├── 11 │ │ │ ├── 110 │ │ │ ├── 111 │ │ │ ├── 112 │ │ │ ├── 113 │ │ │ ├── 114 │ │ │ ├── 115 │ │ │ ├── 116 │ │ │ ├── 117 │ │ │ ├── 118 │ │ │ ├── 119 │ │ │ ├── 12 │ │ │ ├── 120 │ │ │ ├── 121 │ │ │ ├── 122 │ │ │ ├── 123 │ │ │ ├── 124 │ │ │ ├── 125 │ │ │ ├── 126 │ │ │ ├── 127 │ │ │ ├── 128 │ │ │ ├── 13 │ │ │ ├── 14 │ │ │ ├── 15 │ │ │ ├── 16 │ │ │ ├── 17 │ │ │ ├── 18 │ │ │ ├── 19 │ │ │ ├── 2 │ │ │ ├── 20 │ │ │ ├── 21 │ │ │ ├── 22 │ │ │ ├── 23 │ │ │ ├── 24 │ │ │ ├── 25 │ │ │ ├── 26 │ │ │ ├── 27 │ │ │ ├── 28 │ │ │ ├── 29 │ │ │ ├── 3 │ │ │ ├── 30 │ │ │ ├── 31 │ │ │ ├── 32 │ │ │ ├── 33 │ │ │ ├── 34 │ │ │ ├── 35 │ │ │ ├── 37 │ │ │ ├── 38 │ │ │ ├── 39 │ │ │ ├── 4 │ │ │ ├── 40 │ │ │ ├── 41 │ │ │ ├── 42 │ │ │ ├── 43 │ │ │ ├── 44 │ │ │ ├── 45 │ │ │ ├── 46 │ │ │ ├── 47 │ │ │ ├── 48 │ │ │ ├── 49 │ │ │ ├── 5 │ │ │ ├── 50 │ │ │ ├── 51 │ │ │ ├── 52 │ │ │ ├── 53 │ │ │ ├── 54 │ │ │ ├── 55 │ │ │ ├── 56 │ │ │ ├── 57 │ │ │ ├── 58 │ │ │ ├── 59 │ │ │ ├── 6 │ │ │ ├── 60 │ │ │ ├── 61 │ │ │ ├── 62 │ │ │ ├── 63 │ │ │ ├── 64 │ │ │ ├── 65 │ │ │ ├── 66 │ │ │ ├── 67 │ │ │ ├── 68 │ │ │ ├── 69 │ │ │ ├── 7 │ │ │ ├── 70 │ │ │ ├── 71 │ │ │ ├── 72 │ │ │ ├── 73 │ │ │ ├── 74 │ │ │ ├── 75 │ │ │ ├── 76 │ │ │ ├── 77 │ │ │ ├── 78 │ │ │ ├── 79 │ │ │ ├── 8 │ │ │ ├── 80 │ │ │ ├── 81 │ │ │ ├── 82 │ │ │ ├── 83 │ │ │ ├── 84 │ │ │ ├── 85 │ │ │ ├── 86 │ │ │ ├── 87 │ │ │ ├── 88 │ │ │ ├── 89 │ │ │ ├── 9 │ │ │ ├── 90 │ │ │ ├── 91 │ │ │ ├── 92 │ │ │ ├── 93 │ │ │ ├── 94 │ │ │ ├── 95 │ │ │ ├── 96 │ │ │ ├── 97 │ │ │ ├── 98 │ │ │ └── 99 │ │ ├── makeReadings.sh │ │ ├── payloads/ │ │ │ ├── FOGL-983.json │ │ │ ├── add_readings_now.json │ │ │ ├── add_snapshot.json │ │ │ ├── addnew.json │ │ │ ├── asset.json │ │ │ ├── bad_sort_1.json │ │ │ ├── bad_sort_2.json │ │ │ ├── bad_update.json │ │ │ ├── badreadings.json │ │ │ ├── count_assets.json │ │ │ ├── delete.json │ │ │ ├── fogl690-error.json │ │ │ ├── fogl690-ok.json │ │ │ ├── get-FOGL-983.json │ │ │ ├── group.json │ │ │ ├── group_time.json │ │ │ ├── insert.json │ │ │ ├── insert2.json │ │ │ ├── insert_1row.json │ │ │ ├── insert_bad.json │ │ │ ├── insert_bad2.json │ │ │ ├── insert_multi_rows.json │ │ │ ├── limit.json │ │ │ ├── limit_max_int.json │ │ │ ├── msec_add_readings_user_ts.json │ │ │ ├── msec_query_asset_aggmin.json │ │ │ ├── msec_query_asset_aggminarray.json │ │ │ ├── msec_query_asset_alias.json │ │ │ ├── msec_query_asset_noalias.json │ │ │ ├── msec_query_readings.json │ │ │ ├── multi_and.json │ │ │ ├── multi_mixed.json │ │ │ ├── multi_or.json │ │ │ ├── newer.json │ │ │ ├── newerBad.json │ │ │ ├── older.json │ │ │ ├── query_readings.json │ │ │ ├── query_readings_in.json │ │ │ ├── query_readings_in_bad_values.json │ │ │ ├── query_readings_not_in.json │ │ │ ├── query_readings_timebucket.json │ │ │ ├── query_readings_timebucket1.json │ │ │ ├── query_readings_timebucket_bad.json │ │ │ ├── query_timebucket_datapoints.json │ │ │ ├── read_id_1xx.json │ │ │ ├── reading_property.json │ │ │ ├── reading_property_array.json │ │ │ ├── reading_property_bad.json │ │ │ ├── readings.json │ │ │ ├── readings_timebucket.json │ │ │ ├── series_group_by_hours.json │ │ │ ├── series_group_by_minutes.json │ │ │ ├── series_seconds.json │ │ │ ├── series_summary_seconds.json │ │ │ ├── skip.json │ │ │ ├── skip_max_int.json │ │ │ ├── sort.json │ │ │ ├── sort2.json │ │ │ ├── timezone.json │ │ │ ├── timezone_bad.json │ │ │ ├── tz_all_insert.json │ │ │ ├── tz_all_read_2.json │ │ │ ├── tz_all_read_3.json │ │ │ ├── tz_readings_insert.json │ │ │ ├── tz_readings_read_2.json │ │ │ ├── tz_readings_read_3.json │ │ │ ├── tz_readings_read_4.json │ │ │ ├── update.json │ │ │ ├── updateKey.json │ │ │ ├── update_bad.json │ │ │ ├── update_expression.json │ │ │ ├── update_json.json │ │ │ ├── update_json2.json │ │ │ ├── update_multi_rows.json │ │ │ ├── updatenow.json │ │ │ ├── where_avg.json │ │ │ ├── where_bad_1.json │ │ │ ├── where_bad_2.json │ │ │ ├── where_bad_3.json │ │ │ ├── where_bad_4.json │ │ │ ├── where_bad_format1.json │ │ │ ├── where_bad_format2.json │ │ │ ├── where_count.json │ │ │ ├── where_count_star.json │ │ │ ├── where_distinct.json │ │ │ ├── where_id_1.json │ │ │ ├── where_id_1_r1.json │ │ │ ├── where_id_1_r2.json │ │ │ ├── where_id_1_r3.json │ │ │ ├── where_id_2.json │ │ │ ├── where_id_not_1.json │ │ │ ├── where_in.json │ │ │ ├── where_in_bad_values.json │ │ │ ├── where_like.json │ │ │ ├── where_max.json │ │ │ ├── where_min.json │ │ │ ├── where_multi_aggregatee.json │ │ │ ├── where_not_in.json │ │ │ ├── where_sum.json │ │ │ ├── where_test2_d1.json │ │ │ ├── where_test2_d2.json │ │ │ ├── where_test2_d3.json │ │ │ ├── where_test2_d4.json │ │ │ └── where_test2_d5.json │ │ ├── testCleanup.sh │ │ ├── testRunner.sh │ │ ├── testSetup.sh │ │ └── testset │ └── python/ │ ├── .coveragerc │ ├── .pytest.ini │ ├── README.rst │ ├── __template__.py │ └── fledge/ │ ├── common/ │ │ ├── configuration_manager_callback.py │ │ ├── configuration_manager_callback_nonasync.py │ │ ├── configuration_manager_callback_norun.py │ │ ├── microservice_management_client/ │ │ │ └── test_microservice_management_client.py │ │ ├── storage_client/ │ │ │ ├── data/ │ │ │ │ ├── payload_aggregate1.json │ │ │ │ ├── payload_aggregate1_alias.json │ │ │ │ ├── payload_aggregate2.json │ │ │ │ ├── payload_aggregate2_alias.json │ │ │ │ ├── payload_aggregate3.json │ │ │ │ ├── payload_aggregate4.json │ │ │ │ ├── payload_aggregate5.json │ │ │ │ ├── payload_aggregate6.json │ │ │ │ ├── payload_aggregate6_alias.json │ │ │ │ ├── payload_aggregate7_alias.json │ │ │ │ ├── payload_aggregate_all.json │ │ │ │ ├── payload_aggregate_where.json │ │ │ │ ├── payload_and_where1.json │ │ │ │ ├── payload_and_where2.json │ │ │ │ ├── payload_and_where_isnull.json │ │ │ │ ├── payload_and_where_notnull.json │ │ │ │ ├── payload_complex_select1.json │ │ │ │ ├── payload_condition_in.json │ │ │ │ ├── payload_condition_not_in.json │ │ │ │ ├── payload_conditions1.json │ │ │ │ ├── payload_conditions2.json │ │ │ │ ├── payload_conditions3.json │ │ │ │ ├── payload_conditions4.json │ │ │ │ ├── payload_conditions5.json │ │ │ │ ├── payload_conditions6.json │ │ │ │ ├── payload_delete_where1.json │ │ │ │ ├── payload_distinct.json │ │ │ │ ├── payload_expr1.json │ │ │ │ ├── payload_expr2.json │ │ │ │ ├── payload_from1.json │ │ │ │ ├── payload_from2.json │ │ │ │ ├── payload_group_by1.json │ │ │ │ ├── payload_group_by1_alias.json │ │ │ │ ├── payload_group_by2.json │ │ │ │ ├── payload_group_by2_alias.json │ │ │ │ ├── payload_insert1.json │ │ │ │ ├── payload_join_with_query.json │ │ │ │ ├── payload_join_with_query_only_table_name.json │ │ │ │ ├── payload_join_without_query.json │ │ │ │ ├── payload_join_without_query_only_table_name.json │ │ │ │ ├── payload_json_properties1.json │ │ │ │ ├── payload_json_properties2.json │ │ │ │ ├── payload_limit1.json │ │ │ │ ├── payload_limit2.json │ │ │ │ ├── payload_limit_offset1.json │ │ │ │ ├── payload_limit_offset2.json │ │ │ │ ├── payload_modifier_set_where.json │ │ │ │ ├── payload_multiple_and_where_with_isnull.json │ │ │ │ ├── payload_multiple_and_where_with_notnull.json │ │ │ │ ├── payload_multiple_or_where_with_isnull.json │ │ │ │ ├── payload_multiple_or_where_with_notnull.json │ │ │ │ ├── payload_nested_join.json │ │ │ │ ├── payload_newer_condition.json │ │ │ │ ├── payload_offset1.json │ │ │ │ ├── payload_offset2.json │ │ │ │ ├── payload_older_condition.json │ │ │ │ ├── payload_or_where1.json │ │ │ │ ├── payload_or_where2.json │ │ │ │ ├── payload_or_where_isnull.json │ │ │ │ ├── payload_or_where_notnull.json │ │ │ │ ├── payload_order_by1.json │ │ │ │ ├── payload_order_by2.json │ │ │ │ ├── payload_order_by3.json │ │ │ │ ├── payload_select1.json │ │ │ │ ├── payload_select1_alias.json │ │ │ │ ├── payload_select2.json │ │ │ │ ├── payload_select2_alias.json │ │ │ │ ├── payload_select3_alias.json │ │ │ │ ├── payload_select_alias_with_timezone.json │ │ │ │ ├── payload_set1.json │ │ │ │ ├── payload_timebucket1.json │ │ │ │ ├── payload_timebucket2.json │ │ │ │ ├── payload_timebucket3.json │ │ │ │ ├── payload_timebucket4.json │ │ │ │ ├── payload_update_set_where1.json │ │ │ │ ├── payload_where_condition_isnull.json │ │ │ │ └── payload_where_condition_notnull.json │ │ │ ├── test_payload_builder.py │ │ │ ├── test_sc_exceptions.py │ │ │ ├── test_storage_client.py │ │ │ └── test_utils.py │ │ ├── test_alert_manager.py │ │ ├── test_audit_logger.py │ │ ├── test_common_utils.py │ │ ├── test_configuration_cache.py │ │ ├── test_configuration_manager.py │ │ ├── test_configuration_validation_helpers.py │ │ ├── test_logger.py │ │ ├── test_plugin_discovery.py │ │ ├── test_process.py │ │ ├── test_service_record.py │ │ ├── test_statistics.py │ │ └── web/ │ │ ├── test_middleware.py │ │ └── test_ssl_wrapper.py │ ├── plugins/ │ │ ├── common/ │ │ │ └── test_plugins_common_utils.py │ │ └── north/ │ │ └── common/ │ │ └── test_common.py │ ├── services/ │ │ ├── common/ │ │ │ ├── microservice_management/ │ │ │ │ └── test_instance.py │ │ │ ├── test_microservice.py │ │ │ └── test_services_common_utils.py │ │ └── core/ │ │ ├── api/ │ │ │ ├── certs/ │ │ │ │ ├── fledge.pem │ │ │ │ ├── fledge.txt │ │ │ │ ├── json/ │ │ │ │ │ └── test.json │ │ │ │ └── pem/ │ │ │ │ └── fledge.pem │ │ │ ├── control_service/ │ │ │ │ ├── test_acl_management.py │ │ │ │ ├── test_entrypoint.py │ │ │ │ ├── test_pipeline.py │ │ │ │ └── test_script_management.py │ │ │ ├── plugins/ │ │ │ │ ├── test_config_validator.py │ │ │ │ ├── test_discovery.py │ │ │ │ ├── test_install.py │ │ │ │ ├── test_remove.py │ │ │ │ └── test_update.py │ │ │ ├── support/ │ │ │ │ └── .gitkeep │ │ │ ├── test_alerts.py │ │ │ ├── test_api_utils.py │ │ │ ├── test_asset_tracker_api.py │ │ │ ├── test_audit.py │ │ │ ├── test_auth_mandatory.py │ │ │ ├── test_auth_optional.py │ │ │ ├── test_backup_restore.py │ │ │ ├── test_browser_assets.py │ │ │ ├── test_certificate_store.py │ │ │ ├── test_common_ping.py │ │ │ ├── test_configuration.py │ │ │ ├── test_filters.py │ │ │ ├── test_notification.py │ │ │ ├── test_package_log.py │ │ │ ├── test_scheduler_api.py │ │ │ ├── test_service.py │ │ │ ├── test_statistics_api.py │ │ │ ├── test_support.py │ │ │ └── test_task.py │ │ ├── asset_tracker/ │ │ │ └── test_asset_tracker.py │ │ ├── interest_registry/ │ │ │ ├── test_change_callback.py │ │ │ └── test_interest_registry.py │ │ ├── scheduler/ │ │ │ ├── test_scheduler.py │ │ │ ├── test_scheduler_entities.py │ │ │ └── test_scheduler_exceptions.py │ │ ├── service_registry/ │ │ │ ├── test_exceptions.py │ │ │ ├── test_monitor.py │ │ │ └── test_service_registry.py │ │ ├── test_connect.py │ │ ├── test_main.py │ │ ├── test_server.py │ │ └── test_user_model.py │ └── tasks/ │ ├── purge/ │ │ ├── test_purge.py │ │ └── test_purge_main.py │ └── statistics/ │ ├── test_statistics_history.py │ └── test_statistics_main.py └── tests-manual/ ├── C/ │ └── services/ │ └── core/ │ ├── CMakeLists.txt │ ├── README │ ├── core_server.cpp │ ├── expected/ │ │ ├── 1 │ │ ├── 10 │ │ ├── 11 │ │ ├── 12 │ │ ├── 13 │ │ ├── 14 │ │ ├── 15 │ │ ├── 16 │ │ ├── 17 │ │ ├── 18 │ │ ├── 19 │ │ ├── 2 │ │ ├── 20 │ │ ├── 21 │ │ ├── 22 │ │ ├── 23 │ │ ├── 3 │ │ ├── 4 │ │ ├── 5 │ │ ├── 6 │ │ ├── 7 │ │ ├── 8 │ │ └── 9 │ ├── main.cpp │ ├── payloads/ │ │ ├── add_child_categories.json │ │ ├── create_category.json │ │ ├── create_category_a.json │ │ ├── create_category_b.json │ │ ├── create_category_update_b.json │ │ ├── create_category_with_values.json │ │ └── setvalue.json │ ├── testRunner.sh │ ├── testSetup.sh │ ├── test_configuration_manager.cpp │ └── testset └── debugger/ ├── .debugrc ├── README.rst ├── attach ├── buffer ├── commands ├── debug ├── detach ├── isolate ├── replay ├── resumeIngest ├── setBuffer ├── state ├── step ├── store └── suspendIngest ================================================ FILE CONTENTS ================================================ ================================================ FILE: .cursor/rules/C/core.mdc ================================================ --- description: Enforce C++11 coding standards for Fledge plugins. globs: ["*.cpp", "*.h"] alwaysApply: true author: "Devki Nandan Ghildiyal" --- ## Fledge Project Context - **Language Focus**: C++11 - **Primary Target**: South/North/Filter plugins - **Tech Stack** - **Languages** : C++11 - **Libraries** : GTest version 1.10.0, boost version 1.71 - **Database** : SQLite version 3, Postgres version 12 # Roles ## Senior Architect - Focus: System design, scalability, security, module boundaries, third-party integrations - Responsibilities: - Review requirements and suggest if any gap is there in the requirement. - Suggest design patterns, scalability strategies, and deployment models. - Validate alignment with NFRs (non-functional requirements). ## Senior Developer - Focus: Implementation, performance, maintainability, code quality - Responsibilities: - Review core logic, refactoring, and REST API contracts. - Ensure idiomatic usage of C++11 standards. - Enforce clean coding practices and SOLID principles. - Fledge plugin API and extension points - Filter pipeline structure and reading processing logic - South/North plugin lifecycle ## Senior QA Engineer - Focus: Test coverage, edge cases, negative scenarios, automation - Responsibilities: - Review requirements and prepare test plan - Validate test plans and coverage. - Suggest boundary tests, failure modes, stress scenarios. - Generate unit tests using GTest version 1.10.0 - Review unit test structure. You are playing **three roles** while reviewing, commenting, or helping with with deep knowledge of Fledge: 1. **Senior Architect** – Guide the system and module design. 2. **Senior Developer** – Evaluate the code quality and implementation. 3. **Senior QA Engineer** – Think from a test and validation standpoint. Respond with comments or suggestions **clearly labeled** by the role, e.g.: - `[Architect] Analyze requirements.md file to find out gaps in requirements if any.` - `[Developer] Use range-based loops.` - `[QA] Add unit tests for empty asset name.` ## Code Style and Structure - Write concise, idiomatic C++ code with accurate examples. - Follow modern C++11 conventions and best practices. - Use object-oriented, procedural, or functional programming patterns as appropriate. - Leverage STL and standard algorithms for collection operations. - Use descriptive variable and method names (e.g., 'isUserSignedIn', 'calculateTotal'). - Structure files into headers (*.h) and implementation files (*.cpp) with logical separation of concerns. ## Naming Conventions - Use PascalCase for class names. - Use camelCase for variable names and methods. - Use SCREAMING_SNAKE_CASE for constants and macros. - Prefix member variables with an m_ (e.g., `m_userId`). ## C++ Features Usage - Prefer modern C++11 features (e.g., auto, range-based loops). - Use `constexpr` and `const` to optimize compile-time computations. ## Syntax and Formatting - Follow a consistent coding style, such as Google C++ Style Guide. - Place braces on the same line for control structures and methods. - Use clear and consistent commenting practices. ## JSON Parsing - Fledge uses RapidJSON for JSON parsing by using C++ '*.h' files from '../../../C/thirdparty/rapidjson/include/rapidjson/' ## REST API Support - Fledge supports REST API by using C++ files from '../../../C/thirdparty/Simple-Web-Server' ## Error Handling and Validation - Use exceptions for error handling (e.g., `std::runtime_error`, `std::invalid_argument`). - Use RAII for resource management to avoid memory leaks. - Validate inputs at function boundaries. - Log errors using a logging class logger.h from '../../../C/common/include/logger.h' ## Performance Optimization - Avoid unnecessary heap allocations; prefer stack-based objects where possible. - Use `std::move` to enable move semantics and avoid copies. - Optimize loops with algorithms from `` (e.g., `std::sort`, `std::for_each`). - Profile and optimize critical sections with tools like Valgrind. ## Key Conventions - Do not use smart pointers. - Avoid global variables; use singletons sparingly. - Use `enum class` for strongly typed enumerations. - Separate interface from implementation in classes. - Use templates and metaprogramming judiciously for generic solutions. ## Testing - Write unit tests using frameworks like Google Test (GTest version 1.10.0). - Mock dependencies with libraries like Google Mock. - Implement integration tests for system components. ## Security - Use secure coding practices to avoid vulnerabilities (e.g., buffer overflows, dangling pointers). - Prefer `std::array` or `std::vector` over raw arrays. - Avoid C-style casts; use `static_cast`, `dynamic_cast`, or `reinterpret_cast` when necessary. - Enforce const-correctness in functions and member variables. ## Documentation - Write clear comments for classes, methods, and critical logic. - Use Doxygen for generating API documentation. - Document assumptions, constraints, and expected behavior of code. All public classes and methods must include Doxygen comments specifying assumptions, constraints, and expected input/output. Follow the official ISO C++ standards and guidelines for best practices in modern C++11 development. ## C++ south plugin development - Use 'plugins/south.mdc' ## C++ north plugin development - use 'plugins/north.mdc' ## C++ Filter plugin development - use 'plugins/filter.mdc' ## Log Levels Fledge support 5 levels of logging, which can be considered in descending order of severity; fatal, error, warning, info and debug. Each of these has a defined use and a targeted audience. By default only the 3 most severe levels of log will be written and presented to the user. | Log Level | Intended Audience | Usage | | :---- | :---- | :---- | | fatal | End-user | This is the most severe error level and is reserved for situations whereby the service that raises them can not continue. It is not for transient failures. | | error | End-user | Errors should be used when a transient issue prevents the service continuing in the short term, but may be recovered without the service restarting. | | warning | End-user | A warning message should be used if the user needs to be aware of some reduction in service or non-fatal issue that does not stop the flow of data. | | info | Code/Pipeline Developer | Informational messages should be used to give the user or developer more information as to the progress of a process or task, but does not impact the result of that task. It can be considered more as a progress tracking aid. | | debug | Code Developer | Debug messages are reserved for the code developers working on a plugin or core features of the Fledge services. | # Message Content Fog Logs - All log entries should be written to be human readable and standalone from the code that raises them. The reader of the log message should not need to have access to the source code in order to understand a log message. They should not include internal code references or variable names, but rather be descriptive regarding any variables printed in the log. - Log messages should not contain source file names, line numbers or function names as these have little to no meaning to the intended audience for the majority of log messages These also take up valuable space that can be better used to give a more in-depth description of the issue. - Not only are the lengths of messages limited in syslog, but special characters such as new lines and carriage returns are mapped to hash codes and hence do not format correctly. Messages should not include such characters and should be simple strings. ================================================ FILE: .cursor/rules/C/plugins/filter.mdc ================================================ --- description: C++ Filter Plugin Architecture. globs: ["*.cpp", "*.h"] alwaysApply: true author: "Devki Nandan Ghildiyal" --- ## Filter Plugin - Filter plugin provides a mechanism to alter data as it flows from sensor to Fledge, or from Fledge to outside. ## General plugin guidelines - General guidelines to write a Fledge plugin is at '../../../docs/plugin_developers_guide/02_writing_plugins.rst' file ## South plugin Guidelines - Specific guidelines to write a north plugin is at '../../../docs/plugin_developers_guide/06_filter_plugins.rst' ## Common support classes - Information about common support classes used by plugin is at '../../../docs/plugin_developers_guide/035_CPP.rst' ## Mutex and Locking - Thread Safety: The fledge filter plugin can receive data (ingest()) and configuration changes (reconfigure()) simultaneously from different threads - Data Consistency: Prevents reading configuration while it's being modified - RAII Pattern: std::lock_guard automatically unlocks when going out of scope, preventing deadlocks - Following sample code demonstrates use of mutex and locks when doing ingestion ``` void ingest(std::vector *readings, std::vector& outReadings) { std::lock_guard guard(m_configMutex); IngestData(readings, outReadings); readings->clear(); } ``` - Following sample code demonstrates use of mutex and locks when doing configuration changes ``` void reconfigure(const std::string& conf) { std::lock_guard guard(m_configMutex); setConfig(conf); handleConfig(m_config); } ``` ## Implementation details of plugin - South plugin fetches data from sensors or external sources and store in Fledge - Common C++ classes used in Fledge framework are at following location '../../../C/common/include' and '../../../C/common/' - C++ class to handle reading in Fledge is at '../../../C/common/include/reading.h' - C++ class to handle datapoint in Fledge is at '../../../C/common/include/datapoint.h' - C++ class to handle logging in Fledge is at '../../../C/common/include/logger.h' - C++ plugin must have a 'plugin.cpp' file - 'plugin.cpp' file must have plugin configuration and - Implementation of requirement of plugin is kept into a separate header and class implementation file which is used by 'plugin.cpp' file - Every plugin has 'docs' and 'tests' directory - 'plugin.cpp' must define plugin of the configuration ## Fledge plugin configuration Every Fledge plugin has a default configuration represented by a JSON. Following example demonstrates minimial configurtion for every plugin. configuration JSON for each plugin must have an elments called "plugin" ``` const char *default_config = QUOTE({ "plugin" : { "description" : "My example plugin in C++", "type" : "string", "default" : "MyPlugin", "readonly" : "true" } }); ``` - constant default_config is a string that contains the JSON configuration document. - QUOTE macro is used to manage JSON document easily - Configuation JSON documment will have multiple elements for each configuration item. - Fledge plugin supports following types | Type | Description | |:-----|:------------| |integer|An integer numeric value. The minimum and maximum properties may be used to control the limits of the values assigned to an integer.| |float|A floating point numeric item. The minimum and maximum properties may be used to control the limits of the values assigned to a float.| |string|An alpha-numeric array of characters that may contain any printable characters. The length property can be used to constrain the maximum length of the string.| |password|It is same as string type. User interfaces do not show this in plain text.| |boolean|A boolean value that can be assigned the values true or false.| |enumeration|The item can be assigned one of a fixed set of values. These values are defined in the options property of the item.| |list|A list of items, the items can be of type string, integer, float, enumeration or object. The type of the items within the list must all be the same, and this is defined via the items property of the list. A limit on the maximum number of entries allowed in the list can be enforced by use of the listSize property.| |kvlist|A key value pair list. The key is a string value always but the value of the item in the list may be of type string, enumeration, float, integer or object. The type of the values in the kvlist is defined by the items property of the configuration item. A limit on the maximum number of entries allowed in the list can be enforced by use of the listSize property.| |object|A complex configuration type with multiple elements that may be used within list and kvlist items only, it is not possible to have object type items outside of a list. Object type configuration items have a set of properties defined, each of which is itself a configuration item.| ## Example for integer type Sample configuration item "register" ``` "register" : { "description" : "The register number to read", "displayName" : "Register", "type" : "integer", "default" : "0", "order" : "1" } ``` ## Example for integer type Sample configuration item "temperature" ``` "temperature" : { "description" : "Temperate of PLC", "displayName" : "PLC Temperature", "type" : "float", "default" : "0", "order" : "2" } ``` ## Example for string type Sample configuration item "asset" ``` "asset" : { "description" : "The name of the asset the plugin will produce", "displayName" : "Asset Name", "type" : "string", "default" : "MyAsset", "order" : "3" } ``` ## Example of password type Sample configuration item "db_password" ``` "db_password" : { "description" : "Password of the database", "displayName" : "Database Password", "type" : "boolean", "default" : "MyAsset", "order" : "4" } ``` ## Example of boolean type Sample configuration item "apply_scaling" ``` "apply_scaling": { "description": "Option to apply scaling", "displayName": "Use Scaling" "type": "boolean", "default": "true", "order" : "5" } ``` ## Example of enumeration type Sample configuration item "authentication" ``` "authentication": { "description": "Server Authentication", "displayName": "Authentication", "type": "enumeration", "options": [ "mandatory", "optional" ], "default": "optional", "order" : "6" } ``` ## Example of list type Sample configuration item "tags" ``` "tags" : { "description" : "A set of tag names on which to operate", "displayName" : "Labels", "type" : "list", "items" : "string", "default" : "[ \"speed\", \"temperature\", \"voltage\" ]", "order" : "7" } ``` ## Example of kvlist type Sample configuration item "expressions" ``` "expressions" : { "description" : "A set of expressions used to evaluate and label data", "displayName" : "Labels", "type" : "kvlist", "items" : "string", "default" : "{\"idle\" : \"speed == 0\"}", "order" : "8" } ``` ## Example of object type Sample configuration item "map" ``` "map": { "description": "A list of datapoints to read and PLC register definitions", "type": "list", "items" : "object", "default": "[ { \"datapoint\" : \"speed\", \"register\" : \"10\", \"width\" : \"1\", \"type\" : \"integer\"} ]", "order" : "3", "displayName" : "PLC Map", "properties" : { "datapoint" : { "description" : "The name of the datapoint to create for the map entry", "displayName" : "Datapoint", "type" : "string", "default" : "datapoint" }, "register" : { "description" : "The register number to read", "displayName" : "Register", "type" : "integer", "default" : "0" }, "width" : { "description" : "Number of registers to read", "displayName" : "Width", "type" : "integer", "maximum" : "4", "default" : "1" }, "type" : { "description" : "The data type to read", "displayName" : "Data Type", "type" : "enumeration", "options" : [ "integer","float", "boolean" ], "default" : "integer" } } } ``` ## Supported Poperties by configuration items in configuration JSON document |Property|Description| |:-----|:------------| |default|The default value for the configuration item. This is always expressed as a string regardless of the type of the configuration item.| |deprecated|A boolean flag to indicate that this item is no longer used and will be removed in a future release.| |description|A description of the configuration item used in the user interface to give more details of the item. Commonly used as a mouse over help prompt.| |displayName|The string to use in the user interface when presenting the configuration item. Generally a more user friendly form of the item name. Item names are referenced within the code.| |items|The type of the items in a list or kvlist configuration item.| |length|The maximum length of the string value of the item.| |listSize|The maximum number of entries allowed in a list or kvlist item.| |mandatory|A boolean flag to indicate that this item can not be left blank.| |maximum|The maximum value for a numeric configuration item.| |minimum|The minimum value for a numeric configuration item.| |options|Only used for enumeration type elements. This is a JSON array of string that contains the options in the enumeration.| |order|Used in the user interface to give an indication of how high up in the dialogue to place this item.| |group|Used to group related items together. The main use of this is within the GUI which will turn each group into a tab in the creation and edit screens.| |readonly|A boolean property that can be used to include items that can not be altered by the API.| |rule|A validation rule that will be run against the value. This must evaluate to true for the new value to be accepted by the API| |type|The type of the configuration item. The list of types supported are; integer, float, string, password, enumeration, boolean, list, kvlist, JSON, URL, IPV4, IPV6, script, code, X509 certificate and northTask.| |validity|An expression used to determine if the configuration item is valid. Used in the UI to gray out one value based on the value of others.| |value|The current value of the configuration item. This is not included when defining a set of default configuration in, for example, a plugin.| |properties|A set of items that are used in list and kvlist type items to create a list of groups of configuration items.| |keyName|A display name to be used for entry and display of key in the key-value list type, with item being an object.| |keyDescription|A description of key value in the key-value list type, with item being an object.| |permissions|An array of user roles that are allowed to update this configuration item. If not given then the configuration item can be updated by any user. If the permissions property is included in a configuration item the array must have at least one entry.| ================================================ FILE: .cursor/rules/C/plugins/north.mdc ================================================ --- description: C++ North Plugin Architecture. globs: ["*.cpp", "*.h"] alwaysApply: true author: "Devki Nandan Ghildiyal" --- ## North Plugin - North plugin extracts data stored into the Fledge and sends it out side to Fledge. - North plugin can send data to a server, a service in the cloud, or other Fledge instance. ## General plugin guidelines - General guidelines to write a Fledge plugin is at '../../../docs/plugin_developers_guide/02_writing_plugins.rst' file ## South plugin Guidelines - Specific guidelines to write a north plugin is at '../../../docs/plugin_developers_guide/04_north_plugins.rst' ## Persisting Data - Persistence feature can be implemented in the plugin to persist state between the execution of plugin. - Guidelines to implement persistance feature is at '../../../docs/plugin_developers_guide/02_persisting_data.rst' ## Common support classes - Information about common support classes used by plugin is at '../../../docs/plugin_developers_guide/035_CPP.rst' ## Mutex and Locking - Thread Safety: The fledge north plugin can send data (send()) and configuration changes (reconfigure()) simultaneously from different threads - Data Consistency: Prevents reading configuration while it's being modified - RAII Pattern: std::lock_guard automatically unlocks when going out of scope, preventing deadlocks - Following sample code demonstrates use of mutex and locks when sending data ``` void send(std::vector *readings, std::vector& outReadings) { std::lock_guard guard(m_configMutex); sendData(readings, outReadings); readings->clear(); } ``` - Following sample code demonstrates use of mutex and locks when doing configuration changes ``` void reconfigure(const std::string& conf) { std::lock_guard guard(m_configMutex); setConfig(conf); handleConfig(m_config); } ``` ## Implementation details of plugin - South plugin fetches data from sensors or external sources and store in Fledge - Common C++ classes used in Fledge framework are at following location '../../../C/common/include' and '../../../C/common/' - C++ class to handle reading in Fledge is at '../../../C/common/include/reading.h' - C++ class to handle datapoint in Fledge is at '../../../C/common/include/datapoint.h' - C++ class to handle logging in Fledge is at '../../../C/common/include/logger.h' - C++ plugin must have a 'plugin.cpp' file - 'plugin.cpp' file must have plugin configuration and - Implementation of requirement of plugin is kept into a separate header and class implementation file which is used by 'plugin.cpp' file - Every plugin has 'docs' and 'tests' directory - 'plugin.cpp' must define plugin of the configuration ## Fledge plugin configuration Every Fledge plugin has a default configuration represented by a JSON. Following example demonstrates minimial configurtion for every plugin. configuration JSON for each plugin must have an elments called "plugin" ``` const char *default_config = QUOTE({ "plugin" : { "description" : "My example plugin in C++", "type" : "string", "default" : "MyPlugin", "readonly" : "true" } }); ``` - constant default_config is a string that contains the JSON configuration document. - QUOTE macro is used to manage JSON document easily - Configuation JSON documment will have multiple elements for each configuration item. - Fledge plugin supports following types | Type | Description | |:-----|:------------| |integer|An integer numeric value. The minimum and maximum properties may be used to control the limits of the values assigned to an integer.| |float|A floating point numeric item. The minimum and maximum properties may be used to control the limits of the values assigned to a float.| |string|An alpha-numeric array of characters that may contain any printable characters. The length property can be used to constrain the maximum length of the string.| |password|It is same as string type. User interfaces do not show this in plain text.| |boolean|A boolean value that can be assigned the values true or false.| |enumeration|The item can be assigned one of a fixed set of values. These values are defined in the options property of the item.| |list|A list of items, the items can be of type string, integer, float, enumeration or object. The type of the items within the list must all be the same, and this is defined via the items property of the list. A limit on the maximum number of entries allowed in the list can be enforced by use of the listSize property.| |kvlist|A key value pair list. The key is a string value always but the value of the item in the list may be of type string, enumeration, float, integer or object. The type of the values in the kvlist is defined by the items property of the configuration item. A limit on the maximum number of entries allowed in the list can be enforced by use of the listSize property.| |object|A complex configuration type with multiple elements that may be used within list and kvlist items only, it is not possible to have object type items outside of a list. Object type configuration items have a set of properties defined, each of which is itself a configuration item.| ## Example for integer type Sample configuration item "register" ``` "register" : { "description" : "The register number to read", "displayName" : "Register", "type" : "integer", "default" : "0", "order" : "1" } ``` ## Example for integer type Sample configuration item "temperature" ``` "temperature" : { "description" : "Temperate of PLC", "displayName" : "PLC Temperature", "type" : "float", "default" : "0", "order" : "2" } ``` ## Example for string type Sample configuration item "asset" ``` "asset" : { "description" : "The name of the asset the plugin will produce", "displayName" : "Asset Name", "type" : "string", "default" : "MyAsset", "order" : "3" } ``` ## Example of password type Sample configuration item "db_password" ``` "db_password" : { "description" : "Password of the database", "displayName" : "Database Password", "type" : "boolean", "default" : "MyAsset", "order" : "4" } ``` ## Example of boolean type Sample configuration item "apply_scaling" ``` "apply_scaling": { "description": "Option to apply scaling", "displayName": "Use Scaling" "type": "boolean", "default": "true", "order" : "5" } ``` ## Example of enumeration type Sample configuration item "authentication" ``` "authentication": { "description": "Server Authentication", "displayName": "Authentication", "type": "enumeration", "options": [ "mandatory", "optional" ], "default": "optional", "order" : "6" } ``` ## Example of list type Sample configuration item "tags" ``` "tags" : { "description" : "A set of tag names on which to operate", "displayName" : "Labels", "type" : "list", "items" : "string", "default" : "[ \"speed\", \"temperature\", \"voltage\" ]", "order" : "7" } ``` ## Example of kvlist type Sample configuration item "expressions" ``` "expressions" : { "description" : "A set of expressions used to evaluate and label data", "displayName" : "Labels", "type" : "kvlist", "items" : "string", "default" : "{\"idle\" : \"speed == 0\"}", "order" : "8" } ``` ## Example of object type Sample configuration item "map" ``` "map": { "description": "A list of datapoints to read and PLC register definitions", "type": "list", "items" : "object", "default": "[ { \"datapoint\" : \"speed\", \"register\" : \"10\", \"width\" : \"1\", \"type\" : \"integer\"} ]", "order" : "3", "displayName" : "PLC Map", "properties" : { "datapoint" : { "description" : "The name of the datapoint to create for the map entry", "displayName" : "Datapoint", "type" : "string", "default" : "datapoint" }, "register" : { "description" : "The register number to read", "displayName" : "Register", "type" : "integer", "default" : "0" }, "width" : { "description" : "Number of registers to read", "displayName" : "Width", "type" : "integer", "maximum" : "4", "default" : "1" }, "type" : { "description" : "The data type to read", "displayName" : "Data Type", "type" : "enumeration", "options" : [ "integer","float", "boolean" ], "default" : "integer" } } } ``` ## Supported Poperties by configuration items in configuration JSON document |Property|Description| |:-----|:------------| |default|The default value for the configuration item. This is always expressed as a string regardless of the type of the configuration item.| |deprecated|A boolean flag to indicate that this item is no longer used and will be removed in a future release.| |description|A description of the configuration item used in the user interface to give more details of the item. Commonly used as a mouse over help prompt.| |displayName|The string to use in the user interface when presenting the configuration item. Generally a more user friendly form of the item name. Item names are referenced within the code.| |items|The type of the items in a list or kvlist configuration item.| |length|The maximum length of the string value of the item.| |listSize|The maximum number of entries allowed in a list or kvlist item.| |mandatory|A boolean flag to indicate that this item can not be left blank.| |maximum|The maximum value for a numeric configuration item.| |minimum|The minimum value for a numeric configuration item.| |options|Only used for enumeration type elements. This is a JSON array of string that contains the options in the enumeration.| |order|Used in the user interface to give an indication of how high up in the dialogue to place this item.| |group|Used to group related items together. The main use of this is within the GUI which will turn each group into a tab in the creation and edit screens.| |readonly|A boolean property that can be used to include items that can not be altered by the API.| |rule|A validation rule that will be run against the value. This must evaluate to true for the new value to be accepted by the API| |type|The type of the configuration item. The list of types supported are; integer, float, string, password, enumeration, boolean, list, kvlist, JSON, URL, IPV4, IPV6, script, code, X509 certificate and northTask.| |validity|An expression used to determine if the configuration item is valid. Used in the UI to gray out one value based on the value of others.| |value|The current value of the configuration item. This is not included when defining a set of default configuration in, for example, a plugin.| |properties|A set of items that are used in list and kvlist type items to create a list of groups of configuration items.| |keyName|A display name to be used for entry and display of key in the key-value list type, with item being an object.| |keyDescription|A description of key value in the key-value list type, with item being an object.| |permissions|An array of user roles that are allowed to update this configuration item. If not given then the configuration item can be updated by any user. If the permissions property is included in a configuration item the array must have at least one entry.| ================================================ FILE: .cursor/rules/C/plugins/south.mdc ================================================ --- description: C++ South Plugin Architecture. globs: ["*.cpp", "*.h"] alwaysApply: true author: "Devki Nandan Ghildiyal" --- ## South Plugin - South plugin are of two types poll plugin and asyc plugin - Poll type plugin calls plugin_poll method at the defined interval to collect data from sensor. - Asych type plugin use some incoming event from device or callback mechanism - SP_ASYNC flag is used to support async feature - Plugin interface version 1.0.0 is used to fetch single reading - Plugin interface version 2.0.0 is used to fetch multiple readings - South plugin can be used to extract controls (Set Point Control) on the underlying device to which it is connected - SP_CONTROL Flag is used to support Set Point Control feature ## General plugin guidelines - General guidelines to write a Fledge plugin is at '../../../docs/plugin_developers_guide/02_writing_plugins.rst' file ## South plugin Guidelines - Specific guidelines to write a south plugin is at '../../../docs/plugin_developers_guide/03_south_C_plugins.rst' ## Persisting Data - Persistence feature can be implemented in the plugin to persist state between the execution of plugin. - SP_PERSIST_DATA flag is used to support persist data feature - Guidelines to implement persistance feature is at '../../../docs/plugin_developers_guide/02_persisting_data.rst' ## Common support classes - Information about common support classes used by plugin is at '../../../docs/plugin_developers_guide/035_CPP.rst' ## Mutex and Locking - Thread Safety: The fledge south plugin can receive data (ingest()) and configuration changes (reconfigure()) simultaneously from different threads - Data Consistency: Prevents reading configuration while it's being modified - RAII Pattern: std::lock_guard automatically unlocks when going out of scope, preventing deadlocks - Following sample code demonstrates use of mutex and locks when doing ingestion ``` void ingest(std::vector *readings, std::vector& outReadings) { std::lock_guard guard(m_configMutex); IngestData(readings, outReadings); readings->clear(); } ``` - Following sample code demonstrates use of mutex and locks when doing configuration changes ``` void reconfigure(const std::string& conf) { std::lock_guard guard(m_configMutex); setConfig(conf); handleConfig(m_config); } ``` ## Implementation details of plugin - South plugin fetches data from sensors or external sources and store in Fledge - Common C++ classes used in Fledge framework are at following location '../../../C/common/include' and '../../../C/common/' - C++ class to handle reading in Fledge is at '../../../C/common/include/reading.h' - C++ class to handle datapoint in Fledge is at '../../../C/common/include/datapoint.h' - C++ class to handle logging in Fledge is at '../../../C/common/include/logger.h' - C++ plugin must have a 'plugin.cpp' file - 'plugin.cpp' file must have plugin configuration and - Implementation of requirement of plugin is kept into a separate header and class implementation file which is used by 'plugin.cpp' file - Every plugin has 'docs' and 'tests' directory - 'plugin.cpp' must define plugin of the configuration ## Fledge plugin configuration Every Fledge plugin has a default configuration represented by a JSON. Following example demonstrates minimial configurtion for every plugin. configuration JSON for each plugin must have an elments called "plugin" ``` const char *default_config = QUOTE({ "plugin" : { "description" : "My example plugin in C++", "type" : "string", "default" : "MyPlugin", "readonly" : "true" } }); ``` - constant default_config is a string that contains the JSON configuration document. - QUOTE macro is used to manage JSON document easily - Configuation JSON documment will have multiple elements for each configuration item. - Fledge plugin supports following types | Type | Description | |:-----|:------------| |integer|An integer numeric value. The minimum and maximum properties may be used to control the limits of the values assigned to an integer.| |float|A floating point numeric item. The minimum and maximum properties may be used to control the limits of the values assigned to a float.| |string|An alpha-numeric array of characters that may contain any printable characters. The length property can be used to constrain the maximum length of the string.| |password|It is same as string type. User interfaces do not show this in plain text.| |boolean|A boolean value that can be assigned the values true or false.| |enumeration|The item can be assigned one of a fixed set of values. These values are defined in the options property of the item.| |list|A list of items, the items can be of type string, integer, float, enumeration or object. The type of the items within the list must all be the same, and this is defined via the items property of the list. A limit on the maximum number of entries allowed in the list can be enforced by use of the listSize property.| |kvlist|A key value pair list. The key is a string value always but the value of the item in the list may be of type string, enumeration, float, integer or object. The type of the values in the kvlist is defined by the items property of the configuration item. A limit on the maximum number of entries allowed in the list can be enforced by use of the listSize property.| |object|A complex configuration type with multiple elements that may be used within list and kvlist items only, it is not possible to have object type items outside of a list. Object type configuration items have a set of properties defined, each of which is itself a configuration item.| ## Example for integer type Sample configuration item "register" ``` "register" : { "description" : "The register number to read", "displayName" : "Register", "type" : "integer", "default" : "0", "order" : "1" } ``` ## Example for integer type Sample configuration item "temperature" ``` "temperature" : { "description" : "Temperate of PLC", "displayName" : "PLC Temperature", "type" : "float", "default" : "0", "order" : "2" } ``` ## Example for string type Sample configuration item "asset" ``` "asset" : { "description" : "The name of the asset the plugin will produce", "displayName" : "Asset Name", "type" : "string", "default" : "MyAsset", "order" : "3" } ``` ## Example of password type Sample configuration item "db_password" ``` "db_password" : { "description" : "Password of the database", "displayName" : "Database Password", "type" : "boolean", "default" : "MyAsset", "order" : "4" } ``` ## Example of boolean type Sample configuration item "apply_scaling" ``` "apply_scaling": { "description": "Option to apply scaling", "displayName": "Use Scaling" "type": "boolean", "default": "true", "order" : "5" } ``` ## Example of enumeration type Sample configuration item "authentication" ``` "authentication": { "description": "Server Authentication", "displayName": "Authentication", "type": "enumeration", "options": [ "mandatory", "optional" ], "default": "optional", "order" : "6" } ``` ## Example of list type Sample configuration item "tags" ``` "tags" : { "description" : "A set of tag names on which to operate", "displayName" : "Labels", "type" : "list", "items" : "string", "default" : "[ \"speed\", \"temperature\", \"voltage\" ]", "order" : "7" } ``` ## Example of kvlist type Sample configuration item "expressions" ``` "expressions" : { "description" : "A set of expressions used to evaluate and label data", "displayName" : "Labels", "type" : "kvlist", "items" : "string", "default" : "{\"idle\" : \"speed == 0\"}", "order" : "8" } ``` ## Example of object type Sample configuration item "map" ``` "map": { "description": "A list of datapoints to read and PLC register definitions", "type": "list", "items" : "object", "default": "[ { \"datapoint\" : \"speed\", \"register\" : \"10\", \"width\" : \"1\", \"type\" : \"integer\"} ]", "order" : "3", "displayName" : "PLC Map", "properties" : { "datapoint" : { "description" : "The name of the datapoint to create for the map entry", "displayName" : "Datapoint", "type" : "string", "default" : "datapoint" }, "register" : { "description" : "The register number to read", "displayName" : "Register", "type" : "integer", "default" : "0" }, "width" : { "description" : "Number of registers to read", "displayName" : "Width", "type" : "integer", "maximum" : "4", "default" : "1" }, "type" : { "description" : "The data type to read", "displayName" : "Data Type", "type" : "enumeration", "options" : [ "integer","float", "boolean" ], "default" : "integer" } } } ``` ## Supported Poperties by configuration items in configuration JSON document |Property|Description| |:-----|:------------| |default|The default value for the configuration item. This is always expressed as a string regardless of the type of the configuration item.| |deprecated|A boolean flag to indicate that this item is no longer used and will be removed in a future release.| |description|A description of the configuration item used in the user interface to give more details of the item. Commonly used as a mouse over help prompt.| |displayName|The string to use in the user interface when presenting the configuration item. Generally a more user friendly form of the item name. Item names are referenced within the code.| |items|The type of the items in a list or kvlist configuration item.| |length|The maximum length of the string value of the item.| |listSize|The maximum number of entries allowed in a list or kvlist item.| |mandatory|A boolean flag to indicate that this item can not be left blank.| |maximum|The maximum value for a numeric configuration item.| |minimum|The minimum value for a numeric configuration item.| |options|Only used for enumeration type elements. This is a JSON array of string that contains the options in the enumeration.| |order|Used in the user interface to give an indication of how high up in the dialogue to place this item.| |group|Used to group related items together. The main use of this is within the GUI which will turn each group into a tab in the creation and edit screens.| |readonly|A boolean property that can be used to include items that can not be altered by the API.| |rule|A validation rule that will be run against the value. This must evaluate to true for the new value to be accepted by the API| |type|The type of the configuration item. The list of types supported are; integer, float, string, password, enumeration, boolean, list, kvlist, JSON, URL, IPV4, IPV6, script, code, X509 certificate and northTask.| |validity|An expression used to determine if the configuration item is valid. Used in the UI to gray out one value based on the value of others.| |value|The current value of the configuration item. This is not included when defining a set of default configuration in, for example, a plugin.| |properties|A set of items that are used in list and kvlist type items to create a list of groups of configuration items.| |keyName|A display name to be used for entry and display of key in the key-value list type, with item being an object.| |keyDescription|A description of key value in the key-value list type, with item being an object.| |permissions|An array of user roles that are allowed to update this configuration item. If not given then the configuration item can be updated by any user. If the permissions property is included in a configuration item the array must have at least one entry.| ================================================ FILE: .cursor/rules/README.md ================================================ # 🎯 How to Use Cursor Rules with AI Prompts This guide explains how to effectively use the Fledge Cursor rules for Python development and documentation in your AI prompts and development workflow. ## 📁 Directory Structure Rules are organized for Python development and documentation: ``` .cursor/rules/ ├── C │   ├── core.mdc # Core C++ Standards + + platform requirements │   └── plugins │   ├── filter.mdc # C++ filter plugin rules │   ├── north.mdc # C++ north plugin rules │   └── south.mdc # C++ south plugin rules ├── README.md # This usage guide ├── python/ # Python-specific rules (Python 3.8.10-3.12, Ubuntu LTS 20.04+, Raspberry Pi) │ ├── core.mdc # Core Python standards + platform requirements │ ├── api.mdc # REST API + web framework dependencies │ ├── config.mdc # Configuration management + validation deps │ └── quality.mdc # Dependencies, logging, performance + requirements.txt ├── tests/ # Testing-specific rules │ └── python/ # Python testing rules │ ├── unit.mdc # Unit testing rules - pytest, coverage, best practices │ └── api.mdc # API integration testing rules - conftest fixtures, http.client patterns └── docs.mdc # Documentation guidelines ``` ## 📋 Available Rule Files | Rule File | Purpose | Applies To | |-----------|---------|------------| | `@C/core` | Core C++ standards,| `*.h`, `*.cpp` | | `@C/plugins/south` | C++ South Plugin| `*.h`, `*.cpp` | | `@C/plugins/north` | C++ North Plugin| `*.h`, `*.cpp` | | `@C/plugins/filter` | C++ Filter Plugin| `*.h`, `*.cpp` | | `@python/core` | Core Python standards, naming, imports | `*.py`, `python/**/*` | | `@python/api` | REST APIs, routes, middleware | API files, routes.py, web middleware | | `@python/config` | Configuration system, data formats | Config files, configuration modules | | `@python/quality` | Dependencies, logging, performance | Requirements files | | `@tests/python/unit` | Unit testing with pytest | Unit test files, test configuration | | `@tests/python/api` | API integration testing with http.client | API integration test files, conftest.py | | `@docs` | Documentation writing | `docs/**/*`, `*.rst` | ## 🏗️ Shared Platform & Dependencies All Python rules include consistent platform and dependency information: ### **Platform Requirements** (Built into all Python rules) - **C++ Standard**: C++11 - **Python Versions**: 3.8.10 - 3.12 (inclusive) - **Ubuntu**: LTS versions, 20.04 onwards (x86_64 & aarch64) - **Raspberry Pi OS**: Bullseye and Bookworm (aarch64 & armv7l) ### **Dependencies Management** (Referenced in all Python rules) - **[python/requirements.txt](python/requirements.txt)** - Runtime dependencies - **[python/requirements-dev.txt](python/requirements-dev.txt)** - Development dependencies - **[python/requirements-test.txt](python/requirements-test.txt)** - Testing dependencies ### **Automatic Context** (No need to repeat in prompts) When you use any `@python/*` rule, the AI automatically knows: ```bash # Instead of writing this every time: "Create a Python function that works on Python 3.8.10-3.12, Ubuntu LTS 20.04+, Raspberry Pi, uses requirements.txt for dependencies..." # You can simply write: @python/core "Create a Python function" # The AI already knows the platform and dependency constraints! ``` ## 🔄 Automatic Rule Application Cursor automatically applies rules based on the files you're working with: ```yaml # Example: Working on Python files automatically applies python/core rules python/fledge/services/core/server.py → @python/core rules active # Working on API files applies both core and API rules python/fledge/services/core/api/auth.py → @python/core + @python/api rules active # Documentation files apply docs rules docs/quick_start/installing.rst → @docs rules active ``` ## 🎯 Explicit Rule References in Prompts ### Direct Rule Invocation ``` @python/core Can you help me write a function that follows Fledge Python standards? @python/api I need to create a new REST endpoint for device management @docs Help me write documentation for this new feature ``` ### Multiple Rule References ``` @python/core @python/quality Help me refactor this code with proper error handling @python/api @python/config Create an API endpoint for configuration management @docs @python/api Document this REST API following both documentation and API standards @python/core @tests/python/unit Create a service class with comprehensive unit tests @tests/python/api @python/api Create API integration tests for new REST endpoints ``` ## 💡 Context-Aware Prompts ### When Working on Python Files ``` # Cursor automatically knows to apply Python rules "Create a new service class that handles sensor data processing" # The AI will automatically follow: - snake_case naming conventions - Type hints and docstrings - FLCoreLogger usage - Async/await patterns - Error handling standards - Python 3.8.10-3.12 compatibility ``` ### When Working on Documentation ``` # In docs/ directory, rules automatically apply "Document this new plugin API" # The AI will automatically: - Use reStructuredText format - Follow Sphinx conventions - Avoid "Fledge" in headings where possible - Include proper cross-references - Use correct heading hierarchy ``` ## 🛠️ Specific Rule-Based Requests ### Configuration Management ``` Using @python/config rules, create a configuration category for my new plugin with: - String, integer, and boolean parameters - Proper validation - Default values wrapped in quotes - Reserved category name checking ``` ### API Development ``` Following @python/api rules, create a REST endpoint that: - Handles role-based access through middleware - Returns camelCase JSON responses - Includes proper error handling - Checks for route conflicts - Uses FLCoreLogger for logging ``` ### Unit Testing ``` Using @tests/python/unit rules, create unit tests that: - Use pytest framework - Include proper mocking with pytest-mock - Test both success and failure cases - Follow the test file naming conventions - Include code coverage setup ``` ### API integration Testing ``` Using @tests/python/api rules, create API integration tests that: - Use http.client library exclusively (no requests) - Leverage conftest.py fixtures like reset_and_start_fledge - Test API endpoints with proper authentication - Use fledge_url and storage_plugin fixtures - Follow system test organization patterns ``` ### Documentation ``` Following @docs rules, create documentation that: - Uses reStructuredText format - Includes proper Sphinx directives - Avoids excessive "Fledge" branding - Has correct heading hierarchy - Includes cross-references to related docs ``` ## 🔀 Advanced Rule Usage ### API Documentation ``` Using @docs rules, create documentation for this Python API (@python/api) that includes proper Sphinx directives and avoids excessive Fledge branding. ``` ### Complete Feature Development ``` I'm creating a new Fledge service that includes: - Python backend (@python/core @python/api) - Configuration management (@python/config) - Unit testing (@tests/python/unit) - API integration testing (@tests/python/api) - Complete documentation (@docs) ``` ## 🔍 Rule-Aware Code Reviews ``` Review this code against @python/core and @python/quality rules: - Check naming conventions (snake_case vs camelCase) - Verify proper logging usage (FLCoreLogger) - Ensure type hints are present - Validate error handling patterns - Check Python version compatibility Review this test code against @tests/python/unit rules: - Validate pytest usage and fixture patterns - Check mocking strategies and test isolation - Ensure proper test organization and naming - Verify code coverage approach ``` ## 🚀 Platform-Specific Development ``` # Old way (verbose, repetitive): Using @python/core rules, help me optimize this code for: - Raspberry Pi ARM architecture (aarch64, armv7l) - Python 3.8.10-3.12 compatibility - Edge device memory constraints - Ubuntu LTS 20.04+ deployment # New way (automatic platform context): @python/core Optimize this code for edge device performance # The AI automatically knows: # - Python 3.8.10-3.12 compatibility # - Ubuntu LTS 20.04+ (x86_64 & aarch64) # - Raspberry Pi OS (aarch64 & armv7l) # - Edge device memory constraints # - Requirements.txt dependency management ``` ## 🐛 Troubleshooting with Rules ``` This code isn't following @python/api middleware patterns. Help me fix the authentication and role validation. This documentation doesn't follow @docs anti-branding guidelines. Help me remove excessive "Fledge" references while maintaining clarity. ``` ## 🔧 Pro Tips for Using Rules Effectively ### 1. Let Rules Work Automatically - Just open files in the appropriate directories - Cursor applies rules based on file patterns (globs) - No need to explicitly mention rules for basic tasks - Rules are automatically in context ### 2. Use Rule Names for Specific Guidance - When you need specific standards applied - When working across multiple technologies - When you want to ensure compliance with particular guidelines - When combining multiple rule sets ### 3. Combine Rules for Complex Tasks - Use multiple @ references for cross-cutting concerns - Leverage rule interactions (e.g., API + Config + Testing) - Apply domain-specific and quality rules together ### 4. Rule-Based Learning ``` Explain the difference between @python/core naming conventions and @python/api response formatting. How do @python/config validation rules work with @python/api endpoints? ``` ### 5. Validation Against Rules ``` Does this code follow @python/quality standards for: - Dependencies management - Logging practices - Performance optimization Does this testing code follow @tests/python/unit standards for: - pytest usage and fixtures - Mocking patterns - Test coverage - Unit testing best practices Does this API test follow @tests/python/api standards for: - http.client usage - conftest.py fixture usage - API testing patterns Validate this documentation against @docs standards for: - reStructuredText formatting - Sphinx directives - Cross-references - Branding guidelines ``` ## 📖 Rule-Specific Examples ### Python Core (@python/core) ``` Create a device manager class that: - Uses snake_case naming - Includes proper docstrings - Has type hints for all methods - Uses FLCoreLogger for logging - Follows the server.py architectural pattern ``` ### API Development (@python/api) ``` Create a REST endpoint for asset management that: - Uses role-based middleware validation - Returns camelCase JSON responses - Handles route conflicts - Includes proper error handling - Uses async/await patterns ``` ### Configuration (@python/config) ``` Design a configuration category that: - Includes string, integer, boolean, and JSON types - Has proper validation rules - Uses quoted default values - Avoids reserved category names - Includes optional validation constraints ``` ### Documentation (@docs) ``` Write API documentation that: - Uses reStructuredText format - Includes proper Sphinx directives - Avoids excessive "Fledge" branding - Has correct heading hierarchy - Includes cross-references to related docs ``` ### Unit Testing (@tests/python/unit) ``` Create comprehensive unit tests that: - Use pytest with proper fixtures - Mock external dependencies appropriately - Achieve meaningful test coverage - Follow unit testing best practices - Test both success and failure scenarios ``` ### API integration Testing (@tests/python/api) ``` Create API integration tests that: - Use http.client library exclusively - Leverage conftest.py fixtures for environment setup - Test API endpoints with authentication flows - Use reset_and_start_fledge for clean test environments - Follow system test organization patterns ``` ### Dependencies & Quality (@python/quality) ``` Manage dependencies and code quality: - Use requirements.txt for dependency management - Follow FLCoreLogger patterns for logging - Optimize for edge device performance - Ensure Python version compatibility - Document dependency constraints ``` ## 🎯 Best Practices Summary 1. **Trust Automatic Application**: Let Cursor apply rules based on file context 2. **Use @ References Explicitly**: When you need specific rule compliance 3. **Combine Rules Strategically**: For Python development with documentation 4. **Validate Against Rules**: Use rules for code review and quality checks 5. **Focus on Core Technologies**: Leverage Python and documentation rules together The rules work best when you let them guide development naturally - they'll automatically apply standards and catch issues as you code! ================================================ FILE: .cursor/rules/docs.mdc ================================================ --- description: "Cursor AI rules for Fledge documentation - covers reStructuredText, Sphinx, file naming, and content guidelines" globs: - "docs/**/*" - "*.rst" alwaysApply: false author: "Ashish Jabble" --- # Documentation Directory Cursor Rules ## Overview This file contains specific rules for working with documentation in the `/docs` directory of the Fledge project. These rules supplement the main project cursor rules and focus on documentation-specific patterns and conventions. ## Documentation Framework - **Format**: reStructuredText (.rst) format exclusively - **Build System**: Sphinx documentation generator - **Theme**: sphinx_rtd_theme (Read the Docs theme) - **Configuration**: All settings in `docs/conf.py` ## 🚫 "Fledge" Branding Guidelines - MINIMIZE USAGE **CRITICAL RULE: Avoid "Fledge" in naming wherever possible** ### What to Avoid: - ❌ Image files: `fledge_architecture.png` - ❌ Directory names: `fledge_authentication/` - ❌ File names: `fledge_configuration.rst` - ❌ Headings: "Fledge Authentication Setup" - ❌ Repetitive content: "Fledge does this... Fledge provides that..." ### What to Use Instead: - ✅ Image files: `architecture_overview.png`, `auth_flow.png` - ✅ Directory names: `authentication/`, `configuration/`, `monitoring/` - ✅ File names: `authentication.rst`, `configuration.rst` - ✅ Headings: "Authentication Setup", "Configuration Guide" - ✅ Content alternatives: "the platform", "the system", "this feature" ### When "Fledge" IS Appropriate: - Main title pages and introductory content - External references and comparisons - Installation package names - API endpoint references where it's part of the actual name ## File Organization ### Directory Structure - `/docs/` - Main documentation root - `/docs/_static/` - Static assets (CSS, images that aren't content) - `/docs/_templates/` - Custom Sphinx templates - `/docs/images/` - Documentation images and screenshots - `/docs/quick_start/` - Getting started guides - `/docs/plugin_developers_guide/` - Plugin development documentation - `/docs/rest_api_guide/` - REST API documentation - `/docs/building_fledge/` - Build and installation guides - `/docs/monitoring/` - System monitoring documentation - `/docs/fledge-rule-DataAvailability/` - Built-in Data Availability rule plugin docs - `/docs/fledge-rule-Threshold/` - Built-in Threshold rule plugin docs - `/docs/fledge-north-OMF.rst` - Built-in OMF north plugin documentation - `/docs/keywords/` - Plugin categorization keywords and mappings - `/docs/fledge_plugins.rst` - Master plugin list with conditional hyperlinks **DIRECTORY NAMING GUIDELINES:** - **AVOID "fledge" in new directory names** - use functional descriptions - Use topic-based naming: `authentication/` instead of `fledge_authentication/` - Keep directory names lowercase with underscores - Focus on the purpose/feature rather than product branding ### File Naming Conventions - Use lowercase with underscores: `file_name.rst` - Index files: `index.rst` for each directory - Numbered files for version/download info: `91_version_history.rst`, `92_downloads.rst` - Descriptive names reflecting content: `securing.rst`, `troubleshooting_pi_server_integration.rst` - **AVOID "fledge" in filenames** - use functional descriptions: `authentication.rst` instead of `fledge_authentication.rst` - Focus on the topic/feature being documented ## reStructuredText Style Guidelines ### Heading Hierarchy Follow this exact hierarchy for consistency: ```rst *************** Document Title (Level 1) *************** =============== Major Section (Level 2) =============== Minor Section (Level 3) ----------------------- Subsection (Level 4) ^^^^^^^^^^^^^^^^^^^^ Sub-subsection (Level 5) """"""""""""""""""""""""" ``` **IMPORTANT NAMING CONVENTIONS:** - **AVOID "Fledge" in headings** unless absolutely necessary for context - Use descriptive, functional titles: "Authentication Configuration" instead of "Fledge Authentication Configuration" - Focus on the feature/functionality rather than the product name - Keep headings concise and user-focused ### Document Structure 1. **Title**: Use level 1 heading with asterisks above and below 2. **Introduction**: Brief overview of the document's purpose 3. **Table of Contents**: Use `.. toctree::` for sections with multiple pages 4. **Main Content**: Organized with appropriate heading levels 5. **Cross-references**: Link to related documentation ### Code Blocks ```rst .. code-block:: language :linenos: :emphasize-lines: 2,3 code here ``` ### Common Directives - `.. note::` - Important information - `.. warning::` - Critical warnings - `.. code-block::` - Code examples - `.. image::` - Images with proper alt text - `.. toctree::` - Table of contents trees ### Images and Media - Store images in `/docs/images/` directory - Use descriptive filenames: `architecture_overview.png` (avoid "fledge_" prefix) - **AVOID "Fledge" in image filenames** - use descriptive terms like `architecture_overview.png` instead of `fledge_architecture_overview.png` - Always include alt text: `.. image:: images/filename.png :alt: Description` - Optimize images for web (reasonable file sizes) - Use subdirectories in images/ for organization by topic - Keep image names concise and topic-focused ### Cross-References and Links - Internal references: `:doc:`filename`` or `:ref:`label`` - External links: `Link text `_ - API references: Follow existing patterns for REST API documentation ## Content Guidelines ### Writing Style - Use clear, concise language suitable for technical documentation - Write in active voice when possible - Use present tense for current functionality - Include step-by-step instructions for procedures - Provide context and examples - **MINIMIZE use of "Fledge" in content** - focus on functionality and features - Use "the platform", "the system", or specific feature names instead of repetitive "Fledge" references ### Code Examples - Include complete, runnable examples when possible - Show both input and expected output - Use realistic data that represents actual Fledge usage - Comment code examples appropriately - Test code examples to ensure they work ### API Documentation - Document all public APIs, parameters, and return values - Include HTTP status codes for REST APIs - Provide curl examples for API endpoints - Show JSON request/response examples - Document error conditions and responses ### Configuration Documentation - Show complete configuration examples - Explain all configuration parameters - Provide default values where applicable - Include configuration validation rules - Link to related configuration sections ## Sphinx Configuration ### Extensions - Keep extensions minimal and focused - Document any new extensions added - Ensure extensions are available in build environment ### Build Process - Use `make html` for local builds - Check for build warnings and errors - Test documentation locally before committing - Verify all links work correctly ### Documentation Generation Scripts - **Location**: `/scripts/` directory contains documentation generation utilities - **Plugin Discovery**: Scripts automatically scan plugin repositories for `docs/` directories - **Content Aggregation**: Pulls documentation from external plugin repos during build - **Branch Management**: Handles DOCBRANCH parameter for version-specific documentation - **Integration**: Merges external plugin docs with core Fledge documentation seamlessly ### Keywords and Categorization System - **Keywords Directory**: `/docs/keywords/` contains category definition files - **Category Mapping**: Each keyword file defines a plugin category (e.g., `Augmentation`, `Cleansing`, `Cloud`) - **Plugin Keywords**: Plugin repositories contain keyword files that reference category keywords - **Automatic Categorization**: Build scripts match plugin keywords with category definitions - **Dynamic Organization**: Plugin list automatically organized into categorical sections - **Conditional Display**: Categories only appear if plugins with matching keywords exist ### Version Management - Version information managed in `conf.py` - DOCBRANCH parameter for plugin documentation - Update version info during releases ### DOCBRANCH System - **Purpose**: Generates documentation from both core Fledge and external plugin repositories - **Core Documentation**: Always included from the main Fledge repository - **Plugin Documentation**: Pulled from individual plugin repositories if they have a `docs/` directory - **Branch Control**: Uses `DOCBRANCH='develop'` parameter (set to actual version during releases) - **Auto-Discovery**: Only includes plugins that have documentation - ignores repos without `docs/` directory - **Generation Scripts**: Located in `/scripts/` directory handle the plugin documentation aggregation - **Build Command**: `subprocess.run(["make generated DOCBRANCH='develop'"], shell=True, check=True)` in `conf.py` ## Plugin Documentation ### Plugin Repository Documentation - **External Plugins**: Each plugin repository can have its own `docs/` directory - **Auto-Discovery**: Build system automatically includes plugin docs if `docs/` directory exists - **Repository Requirement**: Plugin repos without `docs/` directory are ignored during documentation generation - **Branch Synchronization**: Uses same DOCBRANCH parameter as core documentation - **Integration**: Plugin docs are seamlessly integrated into the main documentation site ### Built-in Plugins (In Core Repository) The following plugins have documentation included directly in the core Fledge repository: - **`fledge-rule-DataAvailability/`** - Data availability rule plugin documentation - **`fledge-rule-Threshold/`** - Threshold rule plugin documentation - **`fledge-north-OMF.rst`** - OMF north plugin documentation ### Plugin Documentation Standards - Each plugin should have its own documentation section - Follow the pattern established in existing plugin docs - Include installation, configuration, and usage instructions - Provide troubleshooting sections - Use the same reStructuredText format and style guidelines ### Auto-Generated Content - Plugin lists and references may be auto-generated by scripts in `/scripts/` directory - Don't manually edit generated content - Use the build system's generation capabilities - Generated content includes plugin discovery from external repositories ### Plugin Listing System (`fledge_plugins.rst`) - **Master List**: All plugins are listed with name and description in `fledge_plugins.rst` - **Smart Hyperlinking**: - ✅ **With Documentation**: Plugin names become hyperlinks if `docs/` directory exists in plugin repo - ❌ **Without Documentation**: Plugin names remain as plain text (no hyperlink) - **Automatic Detection**: Build system checks for documentation availability during generation - **Comprehensive Coverage**: Includes all available Fledge plugins regardless of documentation status ### Plugin Categorization System - **Keyword-Based Organization**: Plugins organized by categories using keyword mapping - **Keywords Directory**: `/docs/keywords/` contains category definitions and mappings - **Plugin Keywords**: Each plugin repository can have a keywords file defining its categories - **Categorical Display**: Plugins grouped and displayed under appropriate category sections - **Dynamic Categorization**: Categories are automatically generated based on available keywords ### Plugin Documentation Sources - **Core Repository Plugins**: Documentation in `/docs/` for built-in plugins - **External Plugin Repos**: Each plugin repository can maintain its own `docs/` directory - **Plugin Directory Reference**: All Fledge-based plugins available in main `plugins/` directory - **Detailed Documentation**: Comprehensive plugin docs when `docs/` directory exists in plugin repo ### Plugin Documentation Workflow 1. **Plugin Discovery**: Build system scans all available Fledge plugin repositories 2. **Documentation Check**: Determines if plugin repo has `docs/` directory 3. **List Generation**: All plugins added to `fledge_plugins.rst` with name and description 4. **Hyperlink Decision**: - Plugins WITH docs → Name becomes clickable hyperlink - Plugins WITHOUT docs → Name remains as plain text 5. **Category Organization**: Plugins grouped by keywords into categorical sections 6. **Integration**: Plugin docs seamlessly integrated into main documentation site ## Quality Standards ### Content Review - Ensure accuracy of all technical information - Verify code examples work with current Fledge version - Check that screenshots are current and accurate - Review for clarity and completeness ### Accessibility - Use proper heading hierarchy for screen readers - Include alt text for all images - Ensure good color contrast in custom CSS - Test with accessibility tools ### Maintenance - Update documentation when features change - Remove or update deprecated information - Keep external links current - Regular review of troubleshooting sections ## Build and Deployment ### Local Testing ```bash cd docs make html # Check _build/html/index.html in browser ``` ### Build Warnings - Address all Sphinx build warnings - Fix broken internal references - Verify external links periodically - Check image references ### Dependencies - Document build dependencies in `requirements.txt` - Keep Sphinx version constraints appropriate - Test builds in clean environments ## Documentation Contribution Guidelines ### New Documentation - Create comprehensive documentation for new features - Follow existing patterns and conventions - Include in appropriate toctree structures - Add cross-references to related content ### Plugin Documentation Contributions - **Adding Plugin Docs**: Create `docs/` directory in plugin repository with proper structure - **Hyperlink Generation**: Plugin names in `fledge_plugins.rst` automatically become hyperlinks when docs exist - **Keywords Assignment**: Add appropriate keyword files to enable categorical organization - **Content Standards**: Follow same reStructuredText standards as core documentation - **Testing**: Verify plugin documentation builds correctly with main documentation site ### Updates - Update documentation when code changes - Maintain backwards compatibility information - Add migration guides for breaking changes - Update version history appropriately ### Review Process - Technical accuracy review - Editorial review for clarity - Build verification - Link checking ## Common Patterns ### Getting Started Guides - Step-by-step instructions - Prerequisites clearly stated - Expected outcomes described - Troubleshooting section included ### Reference Documentation - Comprehensive parameter listings - Example configurations - Default values documented - Related settings cross-referenced ### Tutorial Content - Progressive complexity - Complete working examples - Clear learning objectives - Summary and next steps ## Troubleshooting Documentation ### Error Messages - Include exact error message text - Provide context for when errors occur - Give specific resolution steps - Link to related configuration ### Common Issues - Document frequently reported problems - Provide multiple solution approaches - Include preventive measures - Reference community resources This documentation should be treated as living guidelines that evolve with the project's needs while maintaining consistency and quality standards. ================================================ FILE: .cursor/rules/python/api.mdc ================================================ --- description: "Python API development rules for Fledge - REST APIs, routes, middleware, and web services" globs: "python/fledge/services/*/api/**/*,python/fledge/*/routes.py,python/fledge/common/web/**/*" alwaysApply: false author: "Ashish Jabble" --- # Python API Development Rules ## Python Version & Platform Requirements ### Supported Python Versions - **Target Range**: Python 3.8.10 - 3.12 (inclusive) - Always test API compatibility across the full supported range ### Deployment Platforms - **Ubuntu**: LTS versions, 20.04 onwards - **Architectures**: x86_64 & aarch64 - **Raspberry Pi OS**: Bullseye and Bookworm distributions - **Architectures**: aarch64 & armv7l ### Dependencies Management - **Runtime Dependencies**: [python/requirements.txt](mdc:python/requirements.txt) - Include aiohttp, web framework deps - **Development Dependencies**: [python/requirements-dev.txt](mdc:python/requirements-dev.txt) - **Testing Dependencies**: [python/requirements-test.txt](mdc:python/requirements-test.txt) - Include pytest-aiohttp - **Use exact versions** for production dependencies - **Consider ARM compatibility** for Raspberry Pi deployment - **Test across Python versions** and target architectures ## REST API Development ### API Route Definition & Management - **Core Service Routes**: Defined in [python/fledge/services/core/routes.py](mdc:python/fledge/services/core/routes.py) - **Microservice Routes**: Exposed at [python/fledge/services/common/microservice_management/routes.py](mdc:python/fledge/services/common/microservice_management/routes.py) - **API Handlers**: Follow existing patterns in [python/fledge/services/core/api/](mdc:python/fledge/services/core/api/) ### Route Definition Best Practices ```python # Example route definition with handler from aiohttp import web from fledge.services.core.api import my_handler def setup_routes(app): """Setup API routes for the service.""" # Check for existing routes to avoid conflicts existing_routes = [route.resource.canonical for route in app.router.routes()] # Define new routes with conflict checking new_routes = [ ('GET', '/fledge/my_endpoint', my_handler.get_data), ('POST', '/fledge/my_endpoint', my_handler.create_data), ('PUT', '/fledge/api/my_endpoint/{id}', my_handler.update_data), ('DELETE', '/fledge/api/my_endpoint/{id}', my_handler.delete_data) ] for method, path, handler in new_routes: # Check for route conflicts before adding if path in existing_routes: raise ValueError(f"Route conflict: {method} {path} already exists") app.router.add_route(method, path, handler) ``` ### Route Conflict Prevention - **Endpoint Uniqueness**: Ensure each endpoint path is unique across the application - **Method Specificity**: Same path can have different HTTP methods (GET, POST, PUT, DELETE) - **Conflict Detection**: Check existing routes before adding new ones - **Route Inspection**: Use `app.router.routes()` to inspect existing routes - **Naming Conventions**: Use descriptive, hierarchical path naming ### Route Organization Guidelines ```python # Core service routes (routes.py) CORE_ROUTES = [ # System management '/fledge/ping', '/fledge/shutdown', '/fledge/restart', # Configuration management '/fledge/category', '/fledge/category/{category_name}', # Plugin management '/fledge/plugins', '/fledge/plugins/{plugin_type}', # Service management '/fledge/service', '/fledge/service/{service_name}' ] # Microservice routes (microservice_management/routes.py) MICROSERVICE_ROUTES = [ # Service '/fledge/service/register', '/fledge/service/unregister', '/fledge/service/ping', '/fledge/service/shutdown', # Health monitoring '/fledge/service/health', '/fledge/service/status' ] ``` ### API Handler Implementation ```python # Example API handler with proper structure from aiohttp import web from fledge.common.logger import FLCoreLogger import json _logger = FLCoreLogger().get_logger(__name__) async def get_data_handler(request): """Handle GET requests for data retrieval.""" try: # Extract parameters param_id = request.match_info.get('id') query_params = request.query # Validate input if param_id and not param_id.isdigit(): raise web.HTTPBadRequest(reason="Invalid ID parameter") # Process request result = await process_data_request(param_id, query_params) # Return consistent JSON response (camelCase keys) response = { "message": "Data retrieved successfully", "data": result, "count": len(result) if isinstance(result, list) else 1 } return web.json_response(response, status=200) except ValueError as ex: _logger.error(ex, "Invalid input parameter") return web.json_response({"message": str(ex)}, status=400) except Exception as ex: _logger.error(ex, "Failed to retrieve data") return web.json_response({"message": "Internal server error"}, status=500) ``` ### Middleware-Based Security & Role Validation Fledge implements endpoint security and role validation through middleware: - **Middleware Location**: [python/fledge/common/web/middleware.py](mdc:python/fledge/common/web/middleware.py) - **Role-Based Access Control**: All endpoint role restrictions handled in middleware - **Centralized Validation**: Security logic centralized in middleware layer - **Automatic Enforcement**: Middleware automatically validates roles for protected endpoints ### Role Validation Implementation ```python # Role validation is handled in middleware.py, not in individual handlers # The middleware intercepts requests and validates user roles before reaching handlers # Example of how middleware handles role validation: async def role_validation_middleware(request, handler): """Middleware that validates user roles for protected endpoints.""" # Extract endpoint information endpoint = request.path method = request.method # Check if endpoint requires specific roles required_roles = get_endpoint_roles(endpoint, method) if required_roles: # Validate user authentication and roles user_roles = await get_user_roles(request) if not any(role in user_roles for role in required_roles): return web.json_response( {"message": "Insufficient permissions"}, status=403 ) # Continue to handler if validation passes return await handler(request) ``` ### Endpoint Role Configuration Role requirements for endpoints are configured and enforced by middleware: ```python # Middleware handles role mapping for different endpoints ENDPOINT_ROLES = { # Administrative operations - admin only '/fledge/shutdown': ['admin'], '/fledge/restart': ['admin'], '/fledge/service': ['admin'], '/fledge/certificate': ['admin'], # Configuration management - admin and editor '/fledge/configuration': ['admin', 'editor'], '/fledge/plugins': ['admin', 'editor'], '/fledge/category': ['admin', 'editor'], # Control operations - admin, editor, and control '/fledge/schedule': ['admin', 'editor', 'control'], '/fledge/notification': ['admin', 'editor', 'control'], '/fledge/control/pipeline': ['admin', 'editor', 'control'], '/fledge/control/script': ['admin', 'editor', 'control'], # Data viewing - admin, editor, control, data-view, and view '/fledge/asset': ['admin', 'editor', 'control', 'data-view', 'view'], '/fledge/reading': ['admin', 'editor', 'control', 'data-view', 'view'], '/fledge/statistics': ['admin', 'editor', 'control', 'data-view', 'view'], # General viewing - all authenticated roles '/fledge/ping': ['admin', 'editor', 'control', 'data-view', 'view'], '/fledge/health': ['admin', 'editor', 'control', 'data-view', 'view'], '/fledge/audit': ['admin', 'editor', 'control', 'data-view', 'view'], # Public endpoints - no authentication required '/fledge/login': [], '/fledge/logout': [] } # Role Hierarchy (from most to least privileged): # admin - Full system access, can modify everything # editor - Can modify configurations and data processing # control - Can manage control operations and pipelines # data-view - Can view data and readings but not modify # view - Basic viewing access, limited data access # Middleware automatically enforces these role requirements # No need to implement role checking in individual handlers ``` ### Security Best Practices for Middleware - **Centralized Role Management**: All role validation handled in middleware.py - **Handler Independence**: Handlers focus on business logic, not security - **Consistent Enforcement**: Same security rules applied across all endpoints - **Configuration-Based**: Role requirements configured, not hardcoded - **Audit Trail**: Middleware logs all security validation attempts - **Error Handling**: Proper HTTP status codes for authorization failures ### REST API Standards - Use proper HTTP status codes (200, 201, 400, 401, 403, 404, 500) - Implement proper authentication/authorization through middleware - Validate all input parameters thoroughly - Return consistent JSON response formats (camelCase for API keys) - Include comprehensive error handling and logging - Follow RESTful conventions for HTTP methods and endpoints - Apply security middleware to all protected endpoints - Use role-based access control for sensitive operations ## Microservice Communication - Use the microservice management client from [python/fledge/common/microservice_management_client/](mdc:python/fledge/common/microservice_management_client/) - Handle service discovery properly - Implement proper retry logic with exponential backoff - Use appropriate timeouts for service calls ## Async/Await Patterns - Use async/await for I/O operations (database, HTTP, file operations) - Properly handle asyncio event loops - Use aiohttp for HTTP client operations - Follow existing async patterns in the codebase - Be mindful of blocking operations in async contexts ================================================ FILE: .cursor/rules/python/config.mdc ================================================ --- description: "Python configuration and data management rules for Fledge - config system, reading objects, and data types" globs: "python/fledge/common/configuration*,python/fledge/services/*/configuration*,**/config*.py" alwaysApply: false author: "Ashish Jabble" --- # Python Configuration & Data Management Rules ## Python Version & Platform Requirements ### Supported Python Versions - **Target Range**: Python 3.8.10 - 3.12 (inclusive) - Always test configuration handling across the full supported range ### Deployment Platforms - **Ubuntu**: LTS versions, 20.04 onwards - **Architectures**: x86_64 & aarch64 - **Raspberry Pi OS**: Bullseye and Bookworm distributions - **Architectures**: aarch64 & armv7l ### Dependencies Management - **Runtime Dependencies**: [python/requirements.txt](mdc:python/requirements.txt) - Include config validation deps - **Development Dependencies**: [python/requirements-dev.txt](mdc:python/requirements-dev.txt) - **Testing Dependencies**: [python/requirements-test.txt](mdc:python/requirements-test.txt) - Include config testing tools - **Use exact versions** for production dependencies - **Consider ARM compatibility** for Raspberry Pi deployment - **Test across Python versions** and target architectures ## Configuration Management ### Configuration System - Use the Fledge configuration management system consistently - Store configuration in JSON format - Validate configuration parameters thoroughly - Provide sensible defaults for all configuration options - Handle configuration changes gracefully ### Configuration Categories & Types Fledge configuration manager supports various types with specific formatting requirements: ```python # Example configuration category definitions config_category = { "string_example": { "description": "A string configuration parameter", "type": "string", "default": "default_value", # Always wrap in quotes "value": "current_value" # Always wrap in quotes }, "integer_example": { "description": "An integer configuration parameter", "type": "integer", "default": "42", # Wrap in quotes but validate as integer "value": "100" # Wrap in quotes but validate as integer }, "boolean_example": { "description": "A boolean configuration parameter", "type": "boolean", "default": "false", # Wrap in quotes: "true" or "false" "value": "true" # Wrap in quotes: "true" or "false" }, "float_example": { "description": "A float configuration parameter", "type": "float", "default": "3.14", # Wrap in quotes but validate as float "value": "2.71" # Wrap in quotes but validate as float }, "enumeration_example": { "description": "An enumeration configuration parameter", "type": "enumeration", "options": ["option1", "option2", "option3"], "default": "option1", # Wrap in quotes, must be from options "value": "option2" # Wrap in quotes, must be from options }, "JSON_example": { "description": "A JSON object configuration parameter", "type": "JSON", "default": "{\"key\": \"value\"}", # JSON string wrapped in quotes "value": "{\"key\": \"updated\"}" # JSON string wrapped in quotes } } ``` ### Supported Configuration Types Fledge configuration manager supports the following types: - **string**: Text values (default type if not specified) - **integer**: Whole numbers - **float**: Decimal numbers - **boolean**: True/false values ("true"/"false" as strings) - **enumeration**: Predefined list of options - **JSON**: Complex objects or arrays - **password**: Encrypted/masked string values - **X509 certificate**: Certificate data - **code**: Editable code blocks (Python, JavaScript, etc.) ### Optional Configuration Items Configuration parameters can include optional properties: ```python config_with_optional_items = { "advanced_parameter": { "description": "An advanced configuration parameter", "type": "integer", "default": "100", "value": "150", # Optional items "minimum": "1", # Minimum allowed value "maximum": "1000", # Maximum allowed value "length": "10", # Maximum string length (for string types) "rule": "value > 0", # Validation rule expression "order": "10", # Display order in UI "readonly": "false", # Whether value can be modified "options": ["opt1", "opt2"], # Available options (for enumeration) "displayName": "Advanced Parameter", # UI display name "validity": "valid", # Validation status "group": "advanced" # Grouping for UI organization } } ``` ### Reserved Configuration Categories Certain category names are reserved by the Fledge configuration manager: ```python # Reserved category names - DO NOT USE for custom configurations RESERVED_CATEGORIES = [ "General", # Core system configuration "Advanced", # Advanced system settings "Utilities", # System utilities configuration "Security", # Security-related settings "rest_api", # REST API configuration "service", # Service-level configuration "storage", # Storage plugin configuration "scheduler", # Task scheduler configuration "dispatcher", # Dispatcher service configuration "logging", # Logging configuration "authentication", # Authentication settings "authorization", # Authorization settings "certificate_store", # Certificate management "audit", # Audit logging configuration "performance" # Performance monitoring settings ] # Use descriptive, specific names for plugin/service configurations # Good examples: GOOD_CATEGORY_NAMES = [ "modbus_tcp", # Specific protocol/plugin name "opcua_client", # Specific functionality "http_north", # Service type and direction "pi_web_api", # Specific target system "temperature_filter", # Specific filter purpose ] ``` ### Configuration Value Handling Rules - **Quote Wrapping**: Always wrap `default` and `value` in double quotes (`""`) - **Type Validation**: Explicitly validate values against their declared type - **Type Conversion**: Convert string values to appropriate types during processing - **Validation Examples**: ```python def validate_config_value(value: str, config_type: str) -> bool: """Validate configuration value against its type.""" try: if config_type == "integer": int(value) # Must be convertible to int elif config_type == "float": float(value) # Must be convertible to float elif config_type == "boolean": return value.lower() in ["true", "false"] elif config_type == "JSON": json.loads(value) # Must be valid JSON # string and enumeration are validated separately return True except (ValueError, json.JSONDecodeError): return False ``` ### Reading Object Format Fledge uses a specific reading object format for sensor data: ```python # Standard Fledge reading object structure reading_object = { "asset": "sensor_name", # Asset name (string) "timestamp": "2024-01-01 12:00:00.000", # ISO timestamp with milliseconds "readings": { # Dictionary of sensor readings "temperature": 25.5, # Numeric values (int/float) "humidity": 60.2, "pressure": 1013.25, "status": "online" # String values allowed } } # Multiple readings in a single object reading_batch = [ { "asset": "sensor01", "timestamp": "2024-01-01 12:00:00.000", "readings": {"temperature": 25.5, "humidity": 60.2} }, { "asset": "sensor02", "timestamp": "2024-01-01 12:00:01.000", "readings": {"pressure": 1013.25, "wind_speed": 12.3} } ] ``` ### Configuration Best Practices - **Descriptive Names**: Use clear, descriptive configuration parameter names - **Validation**: Always validate configuration values against their types - **Error Handling**: Provide clear error messages for invalid configurations - **Documentation**: Include comprehensive descriptions for all parameters - **Defaults**: Provide sensible defaults that work in most environments - **Type Safety**: Ensure type conversion is handled safely with proper error checking - **Reading Format**: Follow standard Fledge reading object structure for sensor data - **Reserved Categories**: Avoid using reserved category names for custom configurations - **Optional Items**: Use optional configuration properties for advanced validation and UI control - **Type Selection**: Choose appropriate types (use "password" for sensitive data, "code" for scripts) - **Validation Rules**: Implement proper minimum/maximum constraints and custom validation rules ## Database Operations - Use the storage service abstraction from [python/fledge/common/storage_client/](mdc:python/fledge/common/storage_client/) - Handle database connection errors gracefully - Use prepared statements for SQL queries - Consider performance for large datasets - Implement proper connection pooling ## Error Handling - Use specific exception types rather than generic Exception - Log errors with appropriate severity levels using Fledge logging framework - Handle async operations properly with try/catch blocks - Return meaningful error messages for API responses - Consider edge computing constraints when handling errors ## Security Considerations - Validate all input parameters to prevent injection attacks - Use secure defaults for configuration - Handle certificates and keys securely - Follow authentication/authorization patterns - Sanitize data before database operations - Never log sensitive information ================================================ FILE: .cursor/rules/python/core.mdc ================================================ --- description: "Core Python development rules for Fledge - code style, imports, file structure, and naming conventions" globs: "*.py,python/**/*" alwaysApply: false author: "Ashish Jabble" --- # Python Core Development Rules ## Python Version & Platform Requirements ### Supported Python Versions - **Minimum**: Python 3.8.10 - **Maximum**: Python 3.12 - **Target Range**: Python 3.8.10 - 3.12 (inclusive) - Always test compatibility across the full supported range ### Deployment Platforms - **Ubuntu**: LTS versions, 20.04 onwards - **Architectures**: x86_64 & aarch64 - **Raspberry Pi OS**: Bullseye and Bookworm distributions - **Architectures**: aarch64 & armv7l ### Dependencies Management - **Runtime Dependencies**: [python/requirements.txt](mdc:python/requirements.txt) - **Development Dependencies**: [python/requirements-dev.txt](mdc:python/requirements-dev.txt) - **Testing Dependencies**: [python/requirements-test.txt](mdc:python/requirements-test.txt) - **Use exact versions** for production dependencies - **Consider ARM compatibility** for Raspberry Pi deployment - **Test across Python versions** and target architectures - **Edge Devices**: Resource-constrained environments with limited CPU/memory - Consider platform-specific limitations and optimizations ### Supported Architectures - **Ubuntu**: x86_64 & aarch64 architectures - **Raspberry Pi OS**: aarch64 & armv7l architectures - **Cross-Platform Testing**: Ensure compatibility across all supported architectures - **Performance Considerations**: ARM-based architectures may have different performance characteristics ### Compatibility Guidelines - Use language features available in Python 3.8.10+ only - Test on all supported architectures: x86_64, aarch64, and armv7l - Consider performance implications on ARM-based edge devices (aarch64, armv7l) - Handle platform-specific dependencies appropriately - Validate package availability across all target architectures ## Code Style & Standards - Follow PEP 8 style guidelines strictly - Use type hints where appropriate for better code documentation (Python 3.8.10+ compatible) - Maximum line length: 120 characters - Use 4 spaces for indentation (never tabs) - Import order: standard library, third-party, local imports - Use double quotes for strings consistently - Use descriptive variable and function names ### Import Management & Circular Dependencies - **Avoid Circular Imports**: Prevent circular dependency issues that can cause import failures - **Local Imports**: Use local imports only as a last resort when no other option exists - **Import Testing**: Ensure local imports actually work and don't break during runtime - **Dependency Design**: Restructure code to eliminate need for circular imports when possible ### Circular Import Prevention Strategies ```python # BAD: Top-level import causing circular dependency from fledge.services.core import server # This can cause circular import # BETTER: Local import as last resort (with comment explaining why) def get_server_instance(): """Get server instance with local import to avoid circular dependency.""" # Require a local import in order to avoid circular import references from fledge.services.core import server return server # BEST: Dependency injection or refactoring to avoid the need class MyHandler: def __init__(self, server_instance=None): self._server = server_instance def process_request(self): if self._server: return self._server.get_info() # ALTERNATIVE: Use interfaces/protocols to break dependencies from typing import Protocol class ServerProtocol(Protocol): def get_info(self) -> dict: ... def shutdown(self) -> None: ... class MyService: def __init__(self, server: ServerProtocol): self._server = server ``` ### Local Import Guidelines When local imports are unavoidable: - **Document Reasoning**: Always comment why local import is necessary - **Test Thoroughly**: Ensure the import works in all execution contexts - **Minimal Scope**: Keep local imports as close to usage as possible - **Error Handling**: Handle potential import failures gracefully - **Consider Alternatives**: Always look for architectural solutions first ```python def handle_server_operation(): """Handle operation requiring server access.""" try: # Local import to avoid circular dependency - server imports this module from fledge.services.core import server # Verify the import worked if not hasattr(server, 'expected_method'): raise ImportError("Server module not properly initialized") return server.expected_method() except ImportError as ex: _logger.error(ex, "Failed to import server module") raise RuntimeError("Server not available") from ex ``` ### Documentation Standards - **Docstrings**: Use pydoc-compatible docstrings for all public functions, classes, and modules - **Docstring Format**: Follow PEP 257 and Google/NumPy style for consistency - **Mandatory Docstrings**: Required for: - All public functions and methods - All public classes - All modules (module-level docstring) - Complex private functions (use judgment) - **Missing Docstrings**: Complain/flag when docstrings are missing for public APIs ### Docstring Examples ```python def get_user_data(user_id: int, include_settings: bool = False) -> dict: """Retrieve user data from the database. Args: user_id: The unique identifier for the user include_settings: Whether to include user settings in response Returns: dict: User data with keys 'id', 'name', 'email', and optionally 'settings' Raises: ValueError: If user_id is not a positive integer DatabaseError: If database connection fails Example: >>> user_data = get_user_data(123, include_settings=True) >>> print(user_data['name']) 'John Doe' """ pass class DeviceManager: """Manages IoT device connections and data collection. This class handles the lifecycle of IoT devices including discovery, connection management, and data retrieval from various sensor types. Attributes: device_count: Number of currently connected devices connection_timeout: Timeout in seconds for device connections Example: >>> manager = DeviceManager() >>> manager.connect_device("sensor_01") >>> data = manager.get_device_data("sensor_01") """ pass ``` ### Docstring Quality Standards - **Clear and Concise**: Describe what the function/class does, not how - **Parameter Documentation**: Document all parameters with types and descriptions - **Return Value Documentation**: Describe return types and structure - **Exception Documentation**: List all exceptions that may be raised - **Usage Examples**: Include practical examples for complex functions - **pydoc Compatibility**: Ensure docstrings render correctly with `python -m pydoc module_name` ### Naming Conventions - **Variables**: Use snake_case for all variable names ```python user_id = 123 device_name = "sensor_01" connection_timeout = 30 ``` - **Functions/Methods**: Use snake_case for all function and method names ```python def get_user_data(): pass def process_sensor_reading(reading_value): pass ``` - **Classes**: Use PascalCase for class names (following PEP 8) ```python class DeviceManager: pass class SensorDataProcessor: pass ``` - **Constants**: Use UPPER_SNAKE_CASE for constants ```python MAX_RETRY_COUNT = 3 DEFAULT_TIMEOUT_SECONDS = 30 API_BASE_URL = "https://api.example.com" ``` ### API Response Naming - **API Response Keys**: Always use camelCase for JSON response keys ```python # Correct API response format response = { "userId": 123, "deviceName": "sensor_01", "lastReadingTime": "2024-01-01T12:00:00Z", "sensorData": { "temperature": 25.5, "humidity": 60.2 } } ``` - **Internal Code**: Use snake_case everywhere else (variables, function names, etc.) - **Database Fields**: Use snake_case for database column names and internal data structures ## File Structure & Organization - Python code lives in [python/fledge/](mdc:python/fledge/) directory - Follow the existing module structure: - [python/fledge/common/](mdc:python/fledge/common/) - shared utilities and common functionality - [python/fledge/services/](mdc:python/fledge/services/) - core services implementation - [python/fledge/tasks/](mdc:python/fledge/tasks/) - background tasks and scheduled operations - [python/fledge/plugins/](mdc:python/fledge/plugins/) - plugin interfaces and base classes ### Core Microservice Architecture - **`server.py`**: The heart of the core microservice - central orchestration and service management - **Core Service Location**: [python/fledge/services/core/server.py](mdc:python/fledge/services/core/server.py) - **Critical Component**: Handles microservice lifecycle, service discovery, and core coordination - **Service Entry Point**: Primary entry point for the core Fledge service - **Integration Hub**: Coordinates between all other services, plugins, and components ================================================ FILE: .cursor/rules/python/quality.mdc ================================================ --- description: "Python quality rules for Fledge - dependencies, logging, performance, and version compatibility" globs: "python/requirements*.txt" alwaysApply: false author: "Ashish Jabble" --- # Python Quality & Best Practices Rules ## Dependencies & Requirements Management ### Requirements Files Structure - **[python/requirements.txt](mdc:python/requirements.txt)** - Runtime dependencies for production - **[python/requirements-dev.txt](mdc:python/requirements-dev.txt)** - Development dependencies (includes runtime + test) - **[python/requirements-test.txt](mdc:python/requirements-test.txt)** - Testing framework dependencies ### Version-Specific Dependencies - Use Python version markers for compatibility: `package==version;python_version>="3.12"` - Example patterns from existing requirements: ``` aiohttp==3.8.6;python_version<"3.12" aiohttp==3.10.11;python_version>="3.12" yarl==1.7.2;python_version<="3.10" yarl==1.9.4;python_version>="3.11" and python_version<"3.12" ``` ### Dependency Guidelines - **Always specify exact versions** for production dependencies - **Use version markers** when different Python versions require different package versions - **Test across Python versions** to ensure compatibility - **Keep requirements files in sync** - test dependencies should match runtime versions - **Document reasoning** for version-specific constraints in comments - **Consider ARM compatibility** for Raspberry Pi deployment ### Requirements File Documentation - **Add comments** explaining version constraints and Python version markers - **Document platform-specific requirements** (e.g., ARM vs x86_64) - **Explain version ranges** when multiple versions are supported for different Python versions - **Include installation notes** for complex dependencies - **Reference upstream issues** when using specific versions due to bugs or compatibility ## Logging ### Logging Framework - **Use FLCoreLogger class** for all logging operations - **Multi-stacktrace support**: FLCoreLogger handles complex stacktrace scenarios - Import from fledge.common.logger: `from fledge.common.logger import FLCoreLogger` - Create logger instance: `_logger = FLCoreLogger().get_logger(__name__)` ### Logging Best Practices - Use the Fledge logging framework consistently across all components - Include appropriate context in log messages for debugging - Use proper log levels (DEBUG, INFO, WARNING, ERROR) - Avoid logging sensitive information (passwords, tokens, API keys, etc.) - Structure log messages for easy parsing and analysis ### FLCoreLogger Usage Examples ```python from fledge.common.logger import FLCoreLogger _logger = FLCoreLogger().get_logger(__name__) # Standard logging _logger.info("Service started successfully") _logger.warning("Configuration parameter missing, using default") _logger.error("Failed to connect to database") # Multi-stacktrace scenarios (handled automatically by FLCoreLogger) try: # Complex operation that might have nested exceptions result = complex_operation() except Exception as ex: _logger.error(ex, "Complex operation failed") ``` ### Log Message Guidelines - Use descriptive messages that provide context - Include relevant data (IDs, names, values) but avoid sensitive information - Use consistent formatting for similar log types - Consider log aggregation and searching when structuring messages ## Testing Integration - **Unit Testing**: Detailed unit testing guidelines are in [python/unit.mdc](mdc:python/unit.mdc) - **System Testing**: Located in [tests/system/python/](mdc:tests/system/python/) - **Test Documentation**: Follow [tests/README.rst](mdc:tests/README.rst) for complete testing instructions ## Performance Guidelines ### Edge Computing Optimization - **Memory Constraints**: Raspberry Pi and edge devices have limited RAM - **CPU Limitations**: ARM processors may be slower than x86_64 - **Storage Constraints**: Limited disk space on edge devices - **Network Considerations**: Potentially unreliable or slow connections ### Performance Best Practices - Optimize database queries and use appropriate indexes - Use appropriate caching strategies (consider memory limits) - Monitor resource usage in production environments - Profile performance-critical code paths on target platforms - Minimize blocking operations in async code - **Test on Raspberry Pi** for realistic performance validation - Consider memory-efficient data structures and algorithms - Implement graceful degradation under resource pressure ## Version Compatibility & Testing ### Python Version Testing - **Test across full range**: Python 3.8.10 through 3.12 - **Use CI/CD matrices** to validate on multiple Python versions - **Avoid deprecated features** that may be removed in newer versions - **Use version-specific workarounds** when necessary with clear documentation ### Platform Testing - **Ubuntu Testing**: Primary development and deployment platform (LTS 20.04+) - **Raspberry Pi Testing**: ARM architecture and resource constraints - **Cross-Architecture**: Ensure code works on x86_64, aarch64, and armv7l - **Performance Validation**: Test on actual Raspberry Pi hardware when possible - **Architecture-Specific Testing**: Validate on all supported architectures: - x86_64 (Ubuntu) - aarch64 (Ubuntu & Raspberry Pi OS) - armv7l (Raspberry Pi OS) ### Compatibility Guidelines - Maintain backwards compatibility for APIs across supported Python versions - Use `sys.version_info` checks for version-specific code paths - Document breaking changes clearly in commit messages and documentation - Consider dependency availability across Python versions and platforms - Test package installation on both Ubuntu and Raspberry Pi OS ## Plugin Development - Implement proper plugin interfaces defined in [python/fledge/plugins/](mdc:python/fledge/plugins/) - Handle plugin lifecycle properly (start, stop, reconfigure) - Use the plugin configuration system - Follow existing plugin patterns for consistency - Consider performance implications for edge devices ================================================ FILE: .cursor/rules/tests/python/api.mdc ================================================ --- description: "Python API integration testing rules for Fledge - conftest fixtures, HTTP client patterns, and API test organization" globs: "tests/system/python/api/**/*.py,tests/system/python/conftest.py" alwaysApply: false author: "Ashish Jabble" --- # API integration Testing Guidelines ## Test Organization & Structure ### Directory Structure - **API integration Tests**: Located in [tests/system/python/api/](mdc:tests/system/python/api/) - **Main conftest**: [tests/system/python/conftest.py](mdc:tests/system/python/conftest.py) - Contains shared fixtures - **pytest Configuration**: [tests/system/python/pytest.ini](mdc:tests/system/python/pytest.ini) - **Test Documentation**: Follow [tests/README.rst](mdc:tests/README.rst) for execution instructions ### Test File Conventions - **Naming**: Test files must begin with `test_` for pytest auto-discovery - **Pattern**: `test_.py` (e.g., `test_authentication.py`, `test_configuration.py`) - **Location**: All API tests in `tests/system/python/api/` directory - **Imports**: Always include `import http.client` for HTTP connections - **Documentation**: Include module docstrings describing the API area being tested ### Test Class Organization - **Class Naming**: Use `TestClassName` pattern (e.g., `TestAuthenticationAPI`, `TestCommon`) - **Method Naming**: Use descriptive names like `test_login_username_regular_user` - **Test Flow**: Organize tests to follow logical API workflows - **Dependencies**: Use fixtures to manage test prerequisites and cleanup ## HTTP Client Standards ### Required HTTP Library - **MUST USE**: `http.client` library only for HTTP/HTTPS connections - **NO requests library**: Do not use `requests` - system tests use `http.client` exclusively - **Import Pattern**: Always include `import http.client` at the top of test files ### HTTP Connection Patterns #### Basic Connection Setup ```python import http.client import json def test_api_endpoint(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) conn.request("GET", "/fledge/ping") r = conn.getresponse() assert 200 == r.status r = r.read().decode() jdoc = json.loads(r) ``` #### POST Requests with JSON Data ```python def test_post_with_data(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) data = {"key": "value"} conn.request("POST", "/fledge/endpoint", json.dumps(data)) r = conn.getresponse() assert 200 == r.status response_data = json.loads(r.read().decode()) ``` #### Authenticated Requests ```python def test_authenticated_request(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) headers = {"authorization": TOKEN} conn.request("GET", "/fledge/protected", headers=headers) r = conn.getresponse() assert 200 == r.status ``` ### HTTP Best Practices - **Status Code Validation**: Always assert expected HTTP status codes - **Response Decoding**: Use `r.read().decode()` to get response text - **JSON Handling**: Use `json.loads()` and `json.dumps()` for JSON data - **Connection Reuse**: Create new connections per test method for isolation - **Error Handling**: Test both success and error response scenarios ## Core Conftest Fixtures ### Essential Fixtures for API Testing #### `reset_and_start_fledge` Primary fixture for test environment setup: ```python def test_api_method(self, reset_and_start_fledge, fledge_url): # Test runs with fresh Fledge instance ``` - **Purpose**: Kills Fledge, resets database, and starts fresh instance - **Parameters**: Uses `storage_plugin`, `readings_plugin`, `authentication` fixtures - **Usage**: Include as first parameter in test methods that need clean environment #### `fledge_url` Provides Fledge server connection details: ```python def test_connection(self, fledge_url): conn = http.client.HTTPConnection(fledge_url) ``` - **Default**: "localhost:8081" - **Override**: Use `--fledge-url` command line option - **Usage**: Required for all HTTP connections to Fledge #### `storage_plugin` Specifies database plugin for tests: ```python @pytest.fixture def storage_plugin(request): return request.config.getoption("--storage-plugin") ``` - **Default**: "sqlite" - **Options**: "sqlite", "postgres", "sqlitelb" - **Usage**: Used by `reset_and_start_fledge` fixture #### `authentication` Defines authentication mode: ```python @pytest.fixture def authentication(): return "optional" # or "mandatory" ``` - **Default**: "optional" - **Override**: Define in individual test files for specific auth requirements - **Usage**: Controls Fledge authentication configuration ### Additional Service Management Fixtures #### `add_south` Adds and configures south services: ```python def test_with_south_service(self, add_south, fledge_url): south_service = add_south("sinusoid", None, fledge_url, "test_service") ``` #### `add_north` Adds and configures north services/tasks: ```python def test_with_north_task(self, add_north, fledge_url): north_task = add_north(fledge_url, "http_north", None, "make", "test_task") ``` #### `add_service` Adds generic services: ```python def test_with_service(self, add_service, fledge_url): service = add_service(fledge_url, "notification", None, 3, "make", "test_svc") ``` ### Utility Fixtures #### `wait_time` and `retries` Control test timing and retry behavior: ```python def test_with_timing(self, fledge_url, wait_time, retries): time.sleep(wait_time) # Default: 5 seconds # Retry logic using retries count (default: 3) ``` #### `remove_data_file` and `remove_directories` Cleanup utilities for test data: ```python def test_with_cleanup(self, remove_data_file, remove_directories): # Test creates files/directories remove_data_file("/path/to/test/file") remove_directories("/path/to/test/dir") ``` ## Test Configuration ### pytest Configuration - **File**: [tests/system/python/pytest.ini](mdc:tests/system/python/pytest.ini) - **Default Options**: `--wait-time=6 --retries=4` - **Command Line Options**: Extensive options for test customization ### Common Command Line Options ```bash # Basic test execution pytest tests/system/python/api/test_authentication.py # With custom Fledge URL pytest --fledge-url=192.168.1.100:8081 tests/system/python/api/ # With different storage plugin pytest --storage-plugin=postgres tests/system/python/api/ # With custom timing pytest --wait-time=10 --retries=5 tests/system/python/api/ ``` ### Available Command Line Arguments - `--storage-plugin`: Database plugin ("sqlite", "postgres", "sqlitelb") - `--readings-plugin`: Readings plugin ("Use main plugin", "sqlitememory", etc.) - `--fledge-url`: Fledge server URL (default: "localhost:8081") - `--wait-time`: Generic wait time between processes (default: 5) - `--retries`: Number of retry attempts (default: 3) - `--south-branch`, `--north-branch`: Plugin branch names for installation - `--use-pip-cache`: Use pip cache for plugin installations ## API Testing Best Practices ### Test Design Principles - **Environment Isolation**: Use `reset_and_start_fledge` for clean test environments - **Fixture Dependencies**: Properly order fixtures based on dependencies - **Response Validation**: Validate both status codes and response content - **Error Scenarios**: Test both success and failure paths - **Authentication**: Handle both authenticated and unauthenticated scenarios ### Common Testing Patterns #### Basic API Endpoint Test ```python class TestEndpoint: def test_get_endpoint(self, reset_and_start_fledge, fledge_url): conn = http.client.HTTPConnection(fledge_url) conn.request("GET", "/fledge/endpoint") r = conn.getresponse() assert 200 == r.status jdoc = json.loads(r.read().decode()) assert "expected_key" in jdoc ``` #### Authentication Flow Test ```python class TestAuthentication: def test_login_flow(self, fledge_url, authentication, reset_and_start_fledge): # Login conn = http.client.HTTPConnection(fledge_url) conn.request("POST", "/fledge/login", json.dumps({"username": "user", "password": "password"})) r = conn.getresponse() assert 200 == r.status jdoc = json.loads(r.read().decode()) token = jdoc["token"] # Use token for authenticated request conn.request("GET", "/fledge/protected", headers={"authorization": token}) r = conn.getresponse() assert 200 == r.status ``` #### Service Configuration Test ```python class TestServiceConfiguration: def test_service_creation(self, add_south, fledge_url): service_name = "test_service" config = {"key": "value"} service = add_south("plugin_name", None, fledge_url, service_name, config) assert service["name"] == service_name ``` ### Error Testing Guidelines - **Invalid Requests**: Test malformed JSON, missing parameters - **Authentication Errors**: Test unauthorized access, invalid tokens - **Resource Not Found**: Test non-existent endpoints and resources - **Constraint Violations**: Test duplicate names, invalid configurations - **Server Errors**: Test scenarios that trigger 500-level responses ### Data Validation Patterns - **JSON Structure**: Validate response JSON contains expected keys - **Data Types**: Verify correct data types in responses - **Value Ranges**: Check that numeric values are within expected ranges - **String Formats**: Validate UUIDs, timestamps, and formatted strings - **Array Contents**: Check array lengths and element structures ## Integration with System Testing ### Plugin Installation Testing - Use fixtures like `add_south`, `add_north` for plugin integration tests - Test both `make` and `package` installation types - Validate plugin discovery and configuration ### Multi-Service Testing - Combine multiple fixtures to test service interactions - Test data flow between south and north services - Validate configuration propagation across services ### Performance and Timing - Use `wait_time` and `retries` for timing-sensitive tests - Account for Fledge startup and shutdown times - Test timeout scenarios and retry mechanisms ## Test Execution Guidelines ### Local Development ```bash # Run all API tests cd tests/system/python pytest api/ # Run specific test file pytest api/test_authentication.py # Run with Disables output capturing pytest -s api/test_authentication.py # Run with verbose output pytest -v api/test_configuration.py # Run with very verbose output pytest -vv api/test_configuration.py # Run with custom Fledge instance pytest --fledge-url=localhost:8082 api/ ``` ### Continuous Integration - Use matrix testing for different storage plugins - Test across multiple Python versions - Validate on different deployment architectures - Include both authenticated and unauthenticated test runs ================================================ FILE: .cursor/rules/tests/python/unit.mdc ================================================ --- description: "Python unit testing rules for Fledge - test organization, framework, execution, and coverage" globs: "tests/unit/python/**/*.py,**/test_*.py,python/requirements-test.txt" alwaysApply: false author: "Ashish Jabble" --- # Python Unit Testing Guidelines ## Test Organization & Structure ### Directory Structure - **Unit Tests**: Located in [tests/unit/python/](mdc:tests/unit/python/) - **Test Instructions**: Follow detailed guidelines in [tests/README.rst](mdc:tests/README.rst) - **File Structure**: Tests should mirror the component structure under `tests/unit/python/fledge/` - **Template**: Use [tests/unit/python/__template__.py](mdc:tests/unit/python/__template__.py) as starting point ### Test File Conventions - **Naming**: Test files must begin with `test_` for pytest auto-discovery - **Pattern**: `test_.py` - **Location**: Place tests in correct directory matching component structure - **Imports**: Follow Fledge import patterns and avoid circular dependencies - **Docstrings**: Include Pydoc-compatible docstrings for test classes and methods ### Test Class & Method Organization - Group related tests in classes using `TestClassName` pattern - Use descriptive test method names: `test_should_return_success_when_valid_input` - Organize tests logically: happy path, edge cases, error conditions - Use pytest fixtures for common setup and teardown - Keep tests focused and atomic - one assertion per test when possible ## Testing Framework & Dependencies ### Primary Framework - **Framework**: pytest (version specified in [python/requirements-test.txt](mdc:python/requirements-test.txt)) - **Dependencies**: All testing dependencies are managed in requirements-test.txt - **Dependency Management**: Reference requirements-test.txt for current versions - do not hardcode versions in documentation ### Core Testing Dependencies Key testing packages (see [python/requirements-test.txt](mdc:python/requirements-test.txt) for current versions): - `pytest` - Main testing framework - `pytest-asyncio` - For async testing support - `pytest-mock` - Mocking framework integration - `pytest-cov` - Code coverage reporting - `pytest-aiohttp` - aiohttp testing utilities - `pylint` - Code quality and linting ### Additional Testing Dependencies - `requests` - For HTTP client testing - `pyserial` - For RTU serial testing - `pytz` - Timezone handling in tests - `aiohttp` and `yarl` - Keep versions synchronized with main requirements ## Test Configuration ### pytest Configuration - **Configuration File**: [tests/unit/python/.pytest.ini](mdc:tests/unit/python/.pytest.ini) - **Minimum Version**: Check requirements-test.txt for current pytest version - **Excluded Directories**: Plugin directories excluded from test recursion - **Test Discovery**: Automatic discovery of test_*.py files ### Coverage Configuration - **Configuration File**: [tests/unit/python/.coveragerc](mdc:tests/unit/python/.coveragerc) - **Omitted Files**: - `__init__.py` and `__template__.py` files - Setup files and plugin directories - Test directories themselves - **Coverage Scope**: Focus on core Fledge components, exclude plugin frameworks ## Test Execution ### Basic pytest Commands Refer to [tests/README.rst](mdc:tests/README.rst) for complete instructions: ```bash # Execute all tests in specific file pytest test_filename.py # Execute specific test class pytest test_filename.py::TestClass # Execute specific test method pytest test_filename.py::TestClass::test_case # Verbose output with detailed information pytest -s -vv # Run tests with coverage pytest --cov=. --cov-report=html ``` ### Advanced Test Execution ```bash # Run tests with full coverage report pytest -s -vv tests/unit/python/fledge/ --cov=. --cov-report=html --cov-config tests/unit/python/.coveragerc # Run tests with XML coverage for CI/CD pytest --cov=. --cov-report html:coverage_html --cov-report xml:coverage.xml # Run specific test patterns pytest -k "test_pattern_name" ``` ## Code Coverage ### Coverage Configuration - **Tool**: pytest-cov framework integration - **Config File**: [tests/unit/python/.coveragerc](mdc:tests/unit/python/.coveragerc) - **Output Formats**: HTML, XML, and terminal reports - **Exclusions**: Configured to omit template files, plugins, and test directories ### Coverage Commands #### Basic Coverage Reports ```bash # Terminal coverage report (default) pytest --cov=. --cov-report=term # Terminal with missing lines shown pytest --cov=. --cov-report=term-missing # HTML coverage report (recommended for development) pytest --cov=. --cov-report=html # JSON coverage report for tools integration pytest --cov=. --cov-report=json # XML coverage report for CI/CD systems pytest --cov=. --cov-report=xml ``` #### Comprehensive Coverage Commands ```bash # Full coverage with HTML and XML (for CI/CD) pytest --cov=. --cov-report=html:coverage_html --cov-report=xml:coverage.xml --cov-config=tests/unit/python/.coveragerc # Coverage with specific source directory and custom config pytest tests/unit/python/fledge/ --cov=fledge --cov-report=html --cov-config=tests/unit/python/.coveragerc # Coverage with minimum percentage threshold (fail if below) pytest --cov=. --cov-report=term --cov-fail-under=80 # Coverage with detailed terminal output and HTML pytest --cov=. --cov-report=term-missing --cov-report=html:htmlcov # Coverage for specific modules only pytest --cov=fledge.services.core --cov=fledge.common --cov-report=html ``` #### Coverage Report Analysis ```bash # Generate coverage report after test run coverage report # Generate detailed HTML report coverage html # Show missing lines for specific file coverage report --show-missing # Coverage report with branch coverage pytest --cov=. --cov-branch --cov-report=html ``` ### Coverage Best Practices #### Coverage Targets & Thresholds - **Meaningful Coverage**: Aim for meaningful test coverage, not just high numbers - **Minimum Thresholds**: Set reasonable minimum coverage (e.g., 80% for core modules) - **Critical Paths**: Require higher coverage (90%+) for business logic and critical code paths - **New Code**: Ensure new code has high test coverage before merging - **Branch Coverage**: Include branch coverage for conditional logic testing #### Coverage Configuration - **Exclude Appropriately**: Use .coveragerc to exclude boilerplate and framework code - **Include Patterns**: Focus coverage on source code, exclude tests and third-party code - **Source Directories**: Specify source directories to avoid including test files in coverage - **Precision**: Set appropriate precision for coverage reporting (e.g., 1 decimal place) #### Coverage Monitoring & Reporting - **Regular Monitoring**: Track coverage trends over time in CI/CD - **Coverage Reports**: Generate reports for code review processes - **Failed Builds**: Fail builds if coverage drops below threshold - **Coverage Badges**: Display coverage status in repository README - **Trend Analysis**: Monitor coverage changes across commits and releases #### Coverage Quality Guidelines - **Test Quality Over Quantity**: High coverage with poor tests is worse than lower coverage with good tests - **Uncovered Code Review**: Regularly review uncovered code to determine if tests are needed - **Coverage Gaps**: Identify and address significant coverage gaps in critical modules - **Integration vs Unit**: Distinguish between unit test coverage and integration test coverage - **Documentation**: Document rationale for excluding files from coverage #### Coverage Anti-Patterns to Avoid - **Coverage Gaming**: Writing tests just to increase coverage percentage - **Shallow Testing**: Tests that call code but don't verify behavior - **Ignoring Branches**: Only testing happy paths without error conditions - **Over-Mocking**: Mocking so extensively that tests don't verify real behavior - **Coverage-Only Metrics**: Using coverage as the only quality metric #### Coverage Integration Examples ##### CI/CD Pipeline Integration ```bash # In GitHub Actions, GitLab CI, etc. pytest --cov=fledge --cov-report=xml --cov-report=html --cov-fail-under=80 ``` ##### Coverage with Multiple Output Formats ```bash # Generate multiple report formats simultaneously pytest --cov=. \ --cov-report=term-missing \ --cov-report=html:htmlcov \ --cov-report=xml:coverage.xml \ --cov-report=json:coverage.json \ --cov-fail-under=80 ``` ##### Coverage Configuration in pytest.ini ```ini [tool:pytest] addopts = --cov=fledge --cov-report=term-missing --cov-report=html --cov-fail-under=80 ``` ##### Coverage Badge Generation ```bash # Generate coverage badge (requires coverage-badge package) coverage-badge -o coverage.svg ``` ## Unit Testing Best Practices ### Test Design Principles - **Isolation**: Each test should be independent and not rely on other tests - **Repeatability**: Tests should produce consistent results across runs - **Fast Execution**: Keep unit tests fast for quick feedback loops (performance is not the focus, but speed aids development) - **Clear Assertions**: Use descriptive assertion messages - **Focused Scope**: Test one behavior per test method - **Deterministic**: Tests should not rely on random data or external timing - **Self-Contained**: Tests should set up their own data and clean up afterwards ### Mocking & Fixtures - **External Dependencies**: Mock all external dependencies (databases, APIs, file system) - **pytest-mock**: Use pytest-mock for integration with pytest fixtures - **Fixture Scope**: Use appropriate fixture scopes (function, class, module, session) - **Test Data**: Create reusable test data through fixtures - **Cleanup**: Ensure proper cleanup of resources and mocks ### Async Testing - **pytest-asyncio**: Use pytest-asyncio for testing async functions - **Event Loops**: Properly handle event loop lifecycle in tests - **Async Fixtures**: Use async fixtures for async setup/teardown - **Timeout Handling**: Set appropriate timeouts for async operations - **Mock Async**: Properly mock async functions and coroutines ### Error Testing - **Exception Testing**: Test both success and failure scenarios - **Error Messages**: Verify error messages and types - **Edge Cases**: Test boundary conditions and edge cases - **Input Validation**: Test invalid inputs and malformed data - **Resource Exhaustion**: Test behavior under resource constraints ## Platform & Version Testing ### Python Version Compatibility - **Target Versions**: Test across Python 3.8.10 through 3.12 - **Version-Specific**: Use version markers in requirements-test.txt for compatibility - **CI/CD Integration**: Use matrices to validate multiple Python versions - **Version Checks**: Use `sys.version_info` for version-specific test behavior ### Platform Testing Guidelines - **Ubuntu Testing**: Primary development platform (LTS 20.04+) - **Raspberry Pi**: ARM architecture testing for deployment compatibility - **Cross-Architecture**: Validate functionality on x86_64, aarch64, and armv7l - **Dependency Availability**: Ensure test dependencies install correctly across platforms - **Functional Validation**: Focus on correctness, not performance characteristics ### Architecture-Specific Testing - **x86_64**: Standard Ubuntu development and production - **aarch64**: Ubuntu ARM64 and Raspberry Pi OS 64-bit - **armv7l**: Raspberry Pi OS 32-bit - **Dependencies**: Ensure test dependencies are available across platforms - **Compatibility**: Verify unit tests pass consistently across architectures - **Environment Differences**: Account for platform-specific behaviors in mocks and fixtures ================================================ FILE: .cursor/services/notification.mdc ================================================ # Fledge Notification Service - Feature Development Rules (MDC Format) --- metadata: version: "1.0.0" last_updated: "2024-01-01" author: "Fledge Development Team" service: "notification-service" language: "cpp" framework: "custom" license: "MIT" --- ## Configuration ### Development Environment ```yaml development: language: "cpp" standard: "c++11" compiler: "gcc-11" build_system: "cmake" testing_framework: "gtest" linting: "clang-tidy" formatting: "clang-format" documentation: "doxygen" ``` ### Project Structure ```yaml project_structure: root: "fledge-service-notification" directories: src: - core/ # Core business logic - api/ # API endpoints and handlers - storage/ # Data persistence layer - utils/ # Utility functions - tests/ # Unit and integration tests include: # Header files docs: # Documentation scripts: # Build and deployment scripts config: # Configuration files ``` ### Naming Conventions ```yaml naming_conventions: classes: "PascalCase" methods: "camelCase" member_variables: "m_camelCase" constants: "UPPER_SNAKE_CASE" namespaces: "lowercase" files: "snake_case" examples: classes: ["NotificationManager", "EmailService"] methods: ["sendNotification", "validateRecipient"] member_variables: ["m_notificationQueue", "m_config"] constants: ["MAX_RETRY_ATTEMPTS", "DEFAULT_TIMEOUT"] ``` ## Development Rules ### Pre-Development Checklist ```yaml pre_development_checklist: architecture: - "Review existing architecture and module boundaries" - "Identify affected components and dependencies" - "Plan integration points with existing services" requirements: - "Define clear acceptance criteria" - "Consider backward compatibility requirements" - "Plan error handling and edge cases" observability: - "Design logging and observability strategy" - "Plan metrics collection" - "Define monitoring alerts" ``` ### Code Quality Standards ```yaml code_quality: complexity: max_function_lines: 50 max_nesting_levels: 3 max_cyclomatic_complexity: 10 memory_management: required: "raw_pointers" thread_safety: required: "mutex_protection" patterns: ["lock_guard", "unique_lock", "atomic_operations"] error_handling: required: "exception_based" forbidden: ["silent_failures", "error_ignoring"] patterns: ["try_catch", "custom_exceptions", "error_logging"] ``` ### C++ Development Standards ```yaml cpp_standards: memory_management: preferred: - "Notification* notification = new Notification()" - "delete notification" error_handling: preferred: - "class NotificationException : public std::runtime_error" - "throw NotificationException(\"Invalid recipient: \" + recipient)" patterns: - "exception_based" - "meaningful_error_messages" - "proper_logging" thread_safety: required: - "mutex_protection_for_shared_resources" - "atomic_operations_where_appropriate" patterns: - "std::lock_guard lock(m_mutex)" - "std::atomic counter" ``` ### API Design Standards ```yaml api_design: restful_endpoints: base_path: "/api/v1" patterns: - "GET /notifications" - "POST /notifications" - "GET /notifications/{id}" - "PUT /notifications/{id}" - "DELETE /notifications/{id}" request_models: required_fields: - "recipient" - "subject" - "message" - "type" optional_fields: - "templateId" - "metadata" validation: - "recipient_format" - "message_length" - "type_enumeration" response_models: standard_fields: - "id" - "status" - "createdAt" - "updatedAt" error_response: - "error_code" - "error_message" - "timestamp" ``` ### Testing Standards ```yaml testing_standards: unit_tests: required: - "success_paths" - "failure_paths" - "edge_cases" - "boundary_conditions" patterns: - "Arrange-Act-Assert" - "Given-When-Then" naming: "MethodName_Scenario_ExpectedResult" integration_tests: required: - "end_to_end_flows" - "api_endpoints" - "database_operations" patterns: - "TestHttpClient" - "TestDatabase" - "MockServices" test_coverage: minimum: 80 critical_paths: 100 new_features: 90 ``` ### Logging and Observability ```yaml logging_standards: levels: - "TRACE" - "DEBUG" - "INFO" - "WARN" - "ERROR" - "FATAL" structured_logging: required_fields: - "timestamp" - "level" - "service" - "operation" optional_fields: - "request_id" - "user_id" - "duration" - "metadata" sensitive_data: forbidden_in_logs: - "passwords" - "api_keys" - "personal_identifiers" - "credit_card_numbers" redaction_patterns: - "password=***" - "key=***" ``` ### Security Standards ```yaml security_standards: input_validation: required: - "recipient_format" - "message_length" - "type_enumeration" - "sql_injection_prevention" patterns: - "whitelist_validation" - "regex_validation" - "length_limits" authentication: required: - "token_validation" - "permission_checks" - "session_management" patterns: - "JWT_tokens" - "OAuth2" - "API_keys" authorization: required: - "role_based_access" - "resource_permissions" - "audit_logging" patterns: - "RBAC" - "ABAC" - "Permission_matrix" ``` ### Performance Guidelines ```yaml performance_guidelines: memory_management: preferred: - "RAII_principles" - "smart_pointers" - "move_semantics" avoid: - "unnecessary_copies" - "memory_leaks" - "fragmentation" async_processing: patterns: - "thread_pools" - "async_await" - "future_promise" use_cases: - "notification_sending" - "batch_processing" - "external_api_calls" caching: strategies: - "in_memory_cache" - "distributed_cache" - "cache_invalidation" patterns: - "LRU_cache" - "TTL_expiration" - "cache_warming" ``` ### Configuration Management ```yaml configuration_management: structure: required_sections: - "database" - "email" - "logging" - "security" optional_sections: - "caching" - "monitoring" - "external_services" validation: required: - "type_safety" - "value_ranges" - "required_fields" patterns: - "schema_validation" - "environment_validation" - "dependency_validation" environment_specific: development: - "debug_logging" - "mock_services" - "local_database" production: - "error_logging_only" - "real_services" - "clustered_database" ``` ### Documentation Standards ```yaml documentation_standards: code_documentation: required: - "public_apis" - "complex_algorithms" - "business_logic" format: "doxygen" tags: - "@brief" - "@param" - "@return" - "@throws" - "@example" api_documentation: required: - "endpoint_descriptions" - "request_response_examples" - "error_codes" format: "OpenAPI_3.0" deployment_documentation: required: - "build_instructions" - "deployment_steps" - "configuration_guide" - "troubleshooting" ``` ### Deployment and DevOps ```yaml deployment_standards: build_system: tool: "cmake" minimum_version: "3.16" cpp_standard: "17" dependencies: - "gtest" - "spdlog" - "nlohmann_json" containerization: base_image: "gcc:11" runtime_image: "debian:bullseye-slim" multi_stage: true security_scanning: true ci_cd: required_stages: - "build" - "test" - "lint" - "security_scan" - "deploy" quality_gates: - "test_coverage >= 80%" - "no_critical_vulnerabilities" - "build_success" ``` ## Code Review Checklist ### Architecture & Design ```yaml architecture_checklist: - "Code follows established architectural patterns" - "No unnecessary coupling between modules" - "Clear separation of concerns" - "Proper use of inheritance vs composition" - "Module boundaries respected" ``` ### Code Quality ```yaml code_quality_checklist: - "All functions are under 50 lines" - "No more than 4 levels of nesting" - "No code duplication (DRY principle)" - "Meaningful variable and function names" - "Consistent coding style" - "No magic numbers without constants" ``` ### Testing ```yaml testing_checklist: - "Unit tests cover all new functionality" - "Integration tests for API endpoints" - "Edge cases and error conditions tested" - "Test names clearly describe behavior" - "Mock objects used appropriately" - "Test coverage meets minimum requirements" ``` ### Security ```yaml security_checklist: - "Input validation implemented" - "Authentication/authorization checks" - "No sensitive data in logs" - "Secure error handling" - "No SQL injection vulnerabilities" - "Proper secrets management" ``` ### Performance ```yaml performance_checklist: - "No N+1 query patterns" - "Efficient algorithms used" - "Memory management follows RAII" - "Async operations where appropriate" - "No blocking operations in hot paths" - "Resource cleanup implemented" ``` ### Documentation ```yaml documentation_checklist: - "Public APIs documented with Doxygen" - "README updated if needed" - "Code comments explain 'why' not 'what'" - "Configuration documented" - "Deployment instructions updated" - "API documentation current" ``` ## Anti-Patterns ### Memory Management Anti-Patterns ```yaml memory_anti_patterns: - "Missing cleanup in destructors" - "Resource leaks in error paths" - "Improper ownership semantics" ``` ### Thread Safety Anti-Patterns ```yaml thread_safety_anti_patterns: - "Shared mutable state without protection" - "Missing mutex locks" - "Race conditions in concurrent access" - "Improper atomic operation usage" - "Deadlock scenarios" ``` ### Error Handling Anti-Patterns ```yaml error_handling_anti_patterns: - "Silent failures" - "Catching all exceptions without handling" - "Incomplete error recovery" - "Insufficient error logging" - "Error swallowing" ``` ### Performance Anti-Patterns ```yaml performance_anti_patterns: - "Unnecessary object copying" - "Inefficient algorithms" - "Blocking operations in hot paths" - "Memory allocation in performance-critical code" - "N+1 query patterns" ``` ## Feature Development Template ### Template Structure ```yaml feature_template: interface_definition: - "Define the feature interface" - "Specify public API methods" - "Document method signatures" implementation: - "Implement the feature class" - "Add proper error handling" - "Include logging and metrics" - "Follow thread safety patterns" testing: - "Create unit tests" - "Add integration tests" - "Test edge cases" - "Verify error conditions" documentation: - "Document public APIs" - "Add usage examples" - "Update README if needed" - "Include configuration docs" ``` ### Template Code Structure ```yaml template_code: header_file: "include/[FeatureName]Service.h" implementation_file: "src/core/[FeatureName]Service.cpp" test_file: "src/tests/[FeatureName]ServiceTest.cpp" documentation_file: "docs/[FeatureName]Service.md" class_structure: - "Public interface methods" - "Private helper methods" - "Member variables" - "Constructor and destructor" test_structure: - "Setup and teardown" - "Success path tests" - "Failure path tests" - "Edge case tests" ``` ## Validation Rules ### Code Validation ```yaml code_validation: static_analysis: - "clang-tidy" - "cppcheck" - "sonarqube" dynamic_analysis: - "valgrind" - "asan" - "tsan" style_checking: - "clang-format" - "cpplint" - "custom_style_rules" ``` ### Test Validation ```yaml test_validation: coverage_requirements: line_coverage: 80 branch_coverage: 70 function_coverage: 90 test_quality: - "No flaky tests" - "Fast execution" - "Clear assertions" - "Proper mocking" ``` ### Security Validation ```yaml security_validation: static_analysis: - "semgrep" - "bandit" - "custom_security_rules" dependency_checking: - "safety" - "snyk" - "vulnerability_scanning" ``` ## Compliance ### Standards Compliance ```yaml compliance: coding_standards: - "C++11 standard" - "MISRA C++ guidelines" - "Google C++ Style Guide" - "Project-specific conventions" security_standards: - "OWASP guidelines" - "CWE/SANS Top 25" - "Industry best practices" performance_standards: - "Response time requirements" - "Throughput requirements" - "Resource utilization limits" ``` ### Quality Gates ```yaml quality_gates: build: - "Successful compilation" - "No warnings" - "Static analysis passed" test: - "All tests passing" - "Coverage requirements met" - "No flaky tests" security: - "No critical vulnerabilities" - "Security scan passed" - "Dependency audit clean" performance: - "Performance benchmarks passed" - "Memory usage within limits" - "Response time requirements met" ``` --- # End of MDC Configuration description: globs: alwaysApply: false --- ================================================ FILE: .cursor/services/notification_code_review.mdc ================================================ --- description: globs: alwaysApply: true --- # Fledge Notification Service - Multi-Document Context (MDC) ### Project Overview This MDC file contains comprehensive rules, guidelines, and documentation for AI-assisted development and code review in the Fledge Notification Service project. It combines code review evaluation criteria, git diff analysis techniques, and project-specific standards. --- ## 1. Code Review Evaluation Criteria ### 1.1 Design & Architecture - Verify the change fits your system's architectural patterns - Avoid unnecessary coupling or speculative features - Enforce clear separation of concerns - Align with defined module boundaries - Check for proper inheritance vs composition decisions ### 1.2 Complexity & Maintainability - Ensure control flow remains flat - Keep cyclomatic complexity low - Abstract duplicate logic (DRY principle) - Remove dead or unreachable code - Refactor dense logic into testable helper methods - Break down complex methods into smaller, focused functions ### 1.3 Functionality & Correctness - Confirm new code paths behave correctly under valid and invalid inputs - Cover all edge cases - Maintain idempotency for retry-safe operations - Satisfy all functional requirements or user stories - Include robust error-handling semantics - Validate input parameters and configuration ### 1.4 Readability & Naming - Check that identifiers clearly convey intent - Comments should explain *why* (not *what*) - Code blocks should be logically ordered - No surprising side-effects hide behind deceptively simple names - Use consistent naming conventions ### 1.5 Best Practices & Patterns - Validate use of language- or framework-specific idioms - Adhere to SOLID principles - Ensure proper resource cleanup - Maintain consistent logging/tracing - Clear separation of responsibilities across layers - Use RAII and smart pointers for memory management ### 1.6 Test Coverage & Quality - Verify unit tests for both success and failure paths - Include integration tests exercising end-to-end flows - Use appropriate mocks/stubs - Include meaningful assertions (including edge-case inputs) - Test names should accurately describe behavior ### 1.7 Standardization & Style - Ensure conformance to style guides (indentation, import/order, naming conventions) - Maintain consistent project structure (folder/file placement) - Zero new linter or formatter warnings - Follow C++11 standards and project conventions ### 1.8 Documentation & Comments - Confirm public APIs or complex algorithms have clear in-code documentation - Update README, Swagger/OpenAPI, CHANGELOG, or other user-facing docs - Use Doxygen-style comments for all public APIs - Include `@brief`, `@param`, `@return`, `@throws` tags ### 1.9 Security & Compliance - Check input validation and sanitization against injection attacks - Ensure proper output encoding - Implement secure error handling - Check dependency license and vulnerability checks - Follow secrets management best practices - Enforce authZ/authN where applicable ### 1.10 Performance & Scalability - Identify N+1 query patterns or inefficient I/O - Check memory management concerns - Avoid heavy hot-path computations - Consider caching, batching, memoization, async patterns - Optimize algorithms where necessary ### 1.11 Observability & Logging - Verify that key events emit metrics or tracing spans - Use appropriate log levels - Redact sensitive data - Include contextual information for monitoring and debugging - Support post-mortem analysis ### 1.12 CI/CD & DevOps - Validate build pipeline integrity - Ensure automated test gating - Check artifact creation - Verify dependency declarations - Follow organizational DevOps best practices --- ## 2. C++ Specific Standards ### 2.1 Naming Conventions - **Classes**: PascalCase (e.g., `NotificationManager`) - **Methods**: camelCase (e.g., `setupFilterPipeline()`) - **Member variables**: m_camelCase (e.g., `m_filterPipeline`) - **Constants**: UPPER_SNAKE_CASE (e.g., `DEFAULT_RETRIGGER_TIME`) - **Namespaces**: lowercase (e.g., `std`) ### 2.2 Memory Management - Follow RAII principles - Avoid manual memory management where possible - Ensure proper cleanup in destructors ### 2.3 Error Handling - Use exceptions for exceptional conditions - Log errors with appropriate log levels - Provide meaningful error messages - Handle resource failures gracefully ### 2.4 Thread Safety - Use mutex for shared resource protection - Consider atomic operations where appropriate - Document thread safety guarantees - Avoid race conditions in concurrent code --- ## 3. Git Diff Analysis Techniques ### 3.1 Understanding Git Diff Commands ```bash # Show file names only git diff --name-only origin/develop...origin/feature-branch # Show statistics git diff --stat origin/develop...origin/feature-branch # Show detailed statistics git diff --numstat origin/develop...origin/feature-branch # Show short statistics git diff --shortstat origin/develop...origin/feature-branch # Show word-level changes git diff --word-diff origin/develop...origin/feature-branch # Show context with more lines git diff -U10 origin/develop...origin/feature-branch ``` ### 3.2 Diff Output Interpretation #### File Statistics Analysis ```bash git diff --numstat origin/develop...origin/feature-branch ``` Output format: `[insertions] [deletions] [filename]` #### Change Pattern Recognition - **High insertion count**: New functionality or major refactoring - **High deletion count**: Code cleanup or breaking changes - **Balanced changes**: Refactoring or feature updates - **Low net change**: Bug fixes or minor improvements ### 3.3 Three-Way Merge Analysis ```bash # Compare two branches git diff branch1...branch2 # Compare with common ancestor git diff branch1..branch2 ``` ### 3.4 Change Impact Assessment #### File-Level Analysis 1. **Header Files**: Interface changes, new dependencies 2. **Source Files**: Implementation changes, new functionality 3. **Test Files**: Test coverage, validation logic 4. **Configuration Files**: Settings, defaults, options #### Line-Level Analysis 1. **Additions (+)**: New code, features, methods 2. **Deletions (-)**: Removed code, cleanup, breaking changes 3. **Context**: Surrounding code for understanding changes ### 3.5 Pattern Recognition in Diffs #### Common Patterns 1. **New Includes**: `#include ` 2. **Method Signatures**: Parameter changes, return type changes 3. **Class Inheritance**: `class X : public Y` 4. **Member Variables**: `Type* m_variable;` 5. **Configuration**: JSON structures, default values #### Red Flags in Diffs 1. **Manual Memory Management**: `new`/`delete` without smart pointers 2. **Missing Error Handling**: No try-catch blocks 3. **Inconsistent Naming**: Mixed naming conventions 4. **Large Methods**: Methods with many lines added 5. **Missing Documentation**: New methods without comments --- ## 4. Issue Severity Levels ### 4.1 Critical - Memory leaks or resource leaks - Race conditions or thread safety issues - Security vulnerabilities - Data corruption risks - Build failures or compilation errors ### 4.2 Major - Performance issues affecting scalability - Architectural violations - Missing error handling - Incomplete functionality - Breaking changes without proper migration ### 4.3 Minor - Code style violations - Missing documentation - Inefficient algorithms - Code duplication - Minor bugs with workarounds ### 4.4 Enhancement - Missing test coverage - Performance optimizations - Code refactoring opportunities - Additional features or improvements - Better error messages or logging --- ## 5. Code Review Process ### 5.1 High-Level Summary Describe product impact and engineering approach in 2-3 sentences: - **Product impact**: What does this change deliver for users or customers? - **Engineering approach**: Key patterns, frameworks, or best practices in use ### 5.2 Fetch and Scope the Diff 1. Run `git fetch origin` to ensure latest code 2. Compute `git diff --name-only --diff-filter=M origin/develop...origin/feature-branch` 3. For each file, run `git diff --quiet origin/develop...origin/feature-branch -- ` 4. Skip files that produce no actual diff hunks ### 5.3 Evaluate Against Criteria For each truly changed file and each diffed hunk, evaluate against the 12 evaluation criteria listed above. ### 5.4 Report Issues For each validated issue, output a nested bullet like this: - File: `:` - Issue: [One-line summary of the root problem] - Fix: [Concise suggested change or code snippet] ### 5.5 Prioritize Issues Group issues by severity in this order: - Critical - Major - Minor - Enhancement ### 5.6 Highlight Positives Include a brief bulleted list of positive findings or well-implemented patterns observed in the diff. --- ## 6. Common Issues to Watch For ### 6.1 Memory Management - Manual `new`/`delete` without smart pointers - Missing cleanup in destructors - Resource leaks in error paths - Improper ownership semantics ### 6.2 Thread Safety - Shared mutable state without protection - Missing mutex locks - Race conditions in concurrent access - Improper atomic operation usage ### 6.3 Error Handling - Missing exception handling - Incomplete error recovery - Silent failures - Insufficient error logging ### 6.4 Performance - Unnecessary object copying - Inefficient algorithms - Blocking operations in hot paths - Memory allocation in performance-critical code ### 6.5 Code Quality - Code duplication - Complex methods (>50 lines) - Deep nesting (>4 levels) - Magic numbers without constants - Inconsistent naming --- ## 7. Positive Patterns to Recognize - Proper use of RAII and smart pointers - Clear separation of concerns - Comprehensive error handling - Good logging and observability - Consistent coding style - Thorough documentation - Appropriate test coverage - Performance-conscious design - Thread-safe implementations - Backward compatibility maintenance --- ## 8. Advanced Git Commands for Analysis ```bash # Show only function changes git diff -p origin/develop...origin/feature-branch | grep -A 5 -B 5 "^[+-].*(" # Show only structural changes git diff --stat --summary origin/develop...origin/feature-branch # Show changes with context git diff -U5 origin/develop...origin/feature-branch # Show only additions git diff --diff-filter=A origin/develop...origin/feature-branch # Show only deletions git diff --diff-filter=D origin/develop...origin/feature-branch # Show only modifications git diff --diff-filter=M origin/develop...origin/feature-branch # Count lines by type git diff origin/develop...origin/feature-branch | grep -c "^+" git diff origin/develop...origin/feature-branch | grep -c "^-" # Find new includes git diff origin/develop...origin/feature-branch | grep "^+#include" # Find new class definitions git diff origin/develop...origin/feature-branch | grep "^+class" # Find new method definitions git diff origin/develop...origin/feature-branch | grep "^+.*(" ``` --- ## 9. Fledge-Specific Guidelines ### 9.1 Notification Service Architecture - Follow existing notification patterns and conventions - Maintain backward compatibility with existing configurations - Use proper plugin architecture for extensibility - Implement proper resource cleanup for notification instances ### 9.2 Filter Pipeline Integration - Ensure thread-safe filter pipeline operations - Implement proper error handling for filter setup - Use smart pointers for filter pipeline management - Add comprehensive logging for filter operations ### 9.3 Configuration Management - Follow Fledge configuration patterns - Implement proper category registration/unregistration - Handle configuration changes gracefully - Validate configuration parameters ### 9.4 Testing Requirements - Unit tests for all new functionality - Integration tests for filter pipeline workflows - Performance tests for large datasets - Error condition testing --- ## 10. AI-Assisted Development Guidelines ### 10.1 Code Generation - Follow established naming conventions - Include proper error handling - Add comprehensive documentation - Ensure thread safety where applicable ### 10.2 Code Review Assistance - Analyze git diffs systematically - Identify potential issues early - Suggest improvements and optimizations - Maintain consistency with existing codebase ### 10.3 Documentation Generation - Create clear, concise documentation - Include examples and usage patterns - Document API changes and breaking changes - Maintain up-to-date README files ### 10.4 Testing Assistance - Generate comprehensive test cases - Include edge case testing - Ensure proper test coverage - Create integration test scenarios --- ## 11. Project-Specific Rules ### 11.1 File Organization - Keep header files in `include/` directories - Organize source files logically - Maintain consistent file naming - Group related functionality together ### 11.2 Build System - Follow CMake conventions - Maintain proper dependency management - Ensure cross-platform compatibility - Include proper version information ### 11.3 Version Control - Use meaningful commit messages - Create feature branches for new development - Maintain clean git history - Follow branching strategies ### 11.4 Code Quality - Pass all linting checks - Maintain consistent formatting - Follow coding standards - Include proper comments --- This MDC file serves as a comprehensive guide for AI-assisted development and code review in the Fledge Notification Service project. It provides structured evaluation criteria, git diff analysis techniques, and project-specific guidelines to ensure high-quality code development and review processes. description: globs: alwaysApply: false --- ================================================ FILE: .github/ISSUE_TEMPLATE/bug_report.yml ================================================ name: "🐛 Bug Report" description: Create a new ticket for a bug. title: "🐛 [BUG] - " labels: ["bug"] assignees: Mark Riddoch body: - type: markdown attributes: value: | ### Please read, before you post! This is a **BUG REPORT for issues in the existing code**. If you have general questions, code handling problems, or ideas, please use the: - Discussion-board: https://github.com/fledge-iot/fledge/discussions - Slack-Channel: Use the fledge or fledge-help Slack Channel on https://slack.lfedge.org Verify first that your issue is not already reported on https://github.com/fledge-iot/fledge/issues --- - type: textarea id: description attributes: label: "Description" description: Please enter an explicit description of your issue placeholder: Short and explicit description of your incident... validations: required: true - type: input id: platform attributes: label: "Environment Platform" description: Please enter the environment details placeholder: Information about the system or platform (e.g., OS, version, architecture). validations: required: true - type: input id: version attributes: label: "Fledge Version" description: Please enter the version details placeholder: The specific version of fledge you are using. validations: required: true - type: dropdown id: installation-method attributes: label: "Installation" description: Fledge installation via options: - Source Code - Package based - Docker Container validations: required: true - type: textarea id: reprod attributes: label: "Steps To Reproduce" description: Please enter an explicit description of your issue value: | 1. 2. 3. 4. See error render: bash validations: required: true - type: textarea id: behavior attributes: label: "Expected Behavior" description: A clear and concise description of what you expected to happen. validations: required: true - type: textarea id: screenshot attributes: label: "Screenshots" description: If applicable, add screenshots to help explain your problem. validations: required: false - type: textarea id: logs attributes: label: "Logs" description: Please copy and paste any relevant log (i.e syslogs) output. This will be automatically formatted into code, so no need for backticks. render: bash validations: required: false - type: textarea id: support-bundle attributes: label: "Support bundle" description: Please share the support bundle. It would be highly appreciated, as it is essential for further troubleshooting. placeholder: Use the Fledge GUI interface to collect the support bundle. Navigate to the left menu, select the 'Support' menu item, click on 'Request New,' and then download the bundle. validations: required: true - type: markdown attributes: value: | #### Thank you for taking the time to file a bug report! Your bug request will be reviewed by the team. ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false ================================================ FILE: .github/ISSUE_TEMPLATE/doc_issue.yml ================================================ name: "📝 Report a documentation issue" description: "Is something wrong, confusing or missing in the docs?" labels: ["documentation"] assignees: Mark Riddoch body: - type: input id: version attributes: label: "Version" description: Please enter the version from https://fledge-iot.readthedocs.io placeholder: Obtain information about the version (e.g., latest, nightly) validations: required: true - type: textarea id: describe-issue attributes: label: "Describe the documentation issue" validations: required: true - type: textarea id: what-solution attributes: label: "What solution would you like to see?" validations: required: true - type: markdown attributes: value: | #### Thank you for taking the time to file a docs issue report! Your request will be reviewed by the team. ================================================ FILE: .github/ISSUE_TEMPLATE/feature_request.yml ================================================ name: "💡 Feature Request" description: Create a new ticket for a new feature request title: "💡 [REQUEST] - <title>" labels: ["question"] assignees: Mark Riddoch body: - type: textarea id: summary attributes: label: "Summary" description: Provide a brief explanation of the feature placeholder: Describe in a few lines your feature request validations: required: true - type: textarea id: basic_example attributes: label: "Basic Example" description: Indicate here some basic examples of your feature. placeholder: A few specific words about your feature request. validations: required: true - type: textarea id: drawbacks attributes: label: "Drawbacks" description: What are the drawbacks/impacts of your feature request? placeholder: Identify the drawbacks and impacts while being neutral on your feature request validations: required: true - type: textarea id: unresolved_question attributes: label: "Unresolved questions" description: What questions still remain unresolved? placeholder: Identify any unresolved issues. validations: required: false - type: textarea id: implementation_pr attributes: label: "Implementation PR" description: Pull request used placeholder: "#Pull Request ID" validations: required: false - type: textarea id: reference_issues attributes: label: "Reference Issues" description: Common issues placeholder: "#Issues IDs" validations: required: false - type: markdown attributes: value: | #### Thank you for helping us improve the project! Your feature request will be reviewed by the team. ================================================ FILE: .github/workflows/checker.yml ================================================ name: Checker on: push: branches: ['**'] jobs: test: name: 🛠️ Build on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ubuntu-22.04, ubuntu-24.04] env: FLEDGE_ROOT: ${{ github.workspace }} PYTHONPATH: ${{ github.workspace }}/python steps: - name: 🛎️ Checkout code uses: actions/checkout@v4 - name: ⚙️ Compile Fledge Core id: make_fledge run: | set -e echo "⚠️ APT is misinterpreting the mirror+file: scheme as a URL 🌐, causing 404 errors ❌ due to a missing or invalid /etc/apt/apt-mirrors.txt file 📄." RELEASE=$(lsb_release -cs) echo "Using release: $RELEASE" cat <<EOF | sudo tee /etc/apt/sources.list deb http://archive.ubuntu.com/ubuntu $RELEASE main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu $RELEASE-updates main restricted universe multiverse deb http://archive.ubuntu.com/ubuntu $RELEASE-backports main restricted universe multiverse deb http://security.ubuntu.com/ubuntu $RELEASE-security main restricted universe multiverse EOF sudo apt-get update sudo apt-get install -y --fix-missing echo "🔧 Run setup prerequisites 📦 and compilation of code 🛠️" cd "$FLEDGE_ROOT" sudo ./requirements.sh make -j"$(nproc)" - name: 🧪 Run C Unit Tests if: steps.make_fledge.outcome == 'success' continue-on-error: true run: | set +e cd "$FLEDGE_ROOT/tests/unit/C" echo "🛠️ Installing C test dependencies..." chmod +x requirements.sh && ./requirements.sh echo "📋 Running C tests..." chmod +x scripts/RunAllTests.sh && ./scripts/RunAllTests.sh mkdir -p "$FLEDGE_ROOT/reports" cp -v results/*.xml "$FLEDGE_ROOT/reports/" || echo "⚠️ No C test reports found" - name: 🧪 Run Python Unit Tests if: steps.make_fledge.outcome == 'success' continue-on-error: true run: | set +e echo "🛠️ Installing Python test dependencies..." python3 -m pip install -Ir python/requirements-test.txt echo "📋 Running Python tests..." python3 -m pytest -s -vv \ --junit-xml="$FLEDGE_ROOT/tests/unit/python/fledge/python_test_output.xml" \ "$FLEDGE_ROOT/tests/unit/python/fledge" \ --tb=line mkdir -p "$FLEDGE_ROOT/reports" cp -v "$FLEDGE_ROOT/tests/unit/python/fledge/"*.xml "$FLEDGE_ROOT/reports/" || echo "⚠️ No Python test report found" # Publish test results to GitHub UI using a third-party action # Note: GitHub Actions does not yet support native test report publishing in the UI # This step uses dorny/test-reporter to visualize test results in the Actions tab - name: 📤 Publish Test Report to GitHub if: steps.make_fledge.outcome == 'success' continue-on-error: true uses: dorny/test-reporter@v1 with: name: 📊 Test Results on ${{ matrix.os }} path: ${{ env.FLEDGE_ROOT }}/reports/*.xml reporter: java-junit fail-on-error: true ================================================ FILE: .gitignore ================================================ # vi *.swp # MacOS Finder .DS_Store ._* # IDE *.idea .vscode/ # Data / cache files data/etc/storage.json data/etc/sqlite.json data/etc/sqlitelb.json data/etc/certs/* data/var data/support data/scripts data/plugins data/snapshots data/logs data/configure_repo_output.txt # SQLite3 default db location and after migration data/*.db data/*.db-wal data/*.db-shm data/*.db-journal scripts/extras/*.db /etc/storage.json /etc/certs/* storage.json # Docs docs/_build docs/__pycache__/ docs/.cache/ docs/plugins docs/services docs/fledge_plugins.rst # Compiled Object files *.pyc # build specific /C/plugins/storage/build /C/services/storage/build /cmake_build /plugins /python_build_dir /services /tasks # Error Logs *.err # Test files *.result *.temp # Keys and certificates *.cert *.csr *.key *.cer *.crt .cache/ # Backup data/backup data/etc/backup_postgres_configuration_cache.json # test .pytest_cache .coverage # Async ingest pymodule python/async_ingest.so* # Filter ingest pymodule python/filter_ingest.so* # Python south & filter plugins python/fledge/plugins/south/* python/fledge/plugins/filter/* python/fledge/plugins/notificationDelivery/* python/fledge/plugins/notificationRule/* # doxygen build doxy/ # aspell backups *.bak tests/unit/C/build tests/unit/C/lib tests/unit/C/*/build ================================================ FILE: .readthedocs.yaml ================================================ # Read the Docs configuration file for Sphinx projects # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details # Required version: 2 # Set the OS, Python version and other tools you might need build: os: ubuntu-20.04 tools: python: "3.8" # You can also specify other tool versions: # nodejs: "20" # rust: "1.70" # golang: "1.20" # Build documentation in the "docs/" directory with Sphinx sphinx: configuration: docs/conf.py # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs # builder: "dirhtml" # Fail on all warnings to avoid broken references # fail_on_warning: true # Optionally build your docs in additional formats such as PDF and ePub # formats: # - pdf # - epub # Optional but recommended, declare the Python requirements required # to build your documentation # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html python: install: - requirements: docs/requirements.txt ================================================ FILE: ADOPTERS.MD ================================================ # Fledge Adopters - Beckhoff - PLC Vendor - Dianomic - IIoT Software - Flir - IR/Gas Cameras - General Atomics - Predator Drone - Google - Search-ML-Cloud-TPUs - JEA - Energy/Water Company - [Motorsports.ai](http://motorsports.ai/) - Racing Digital Twins - Nexcom - Industrial Gateways - Nokia - Wireless Communications - OSIsoft - Data Infrastructure - Rovisys - Industrial SI - Transpara - HMI for Process Manufacturers - Wago - PLC Vendor - Zededa - VMs for IoT - RTE France - T&D - Nueman Aluminium ================================================ FILE: C/common/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.4.0) if(COMMAND cmake_policy) cmake_policy(SET CMP0003 NEW) endif(COMMAND cmake_policy) # Get the os name execute_process(COMMAND bash -c "cat /etc/os-release | grep -w ID | cut -f2 -d'='" OUTPUT_VARIABLE OS_NAME OUTPUT_STRIP_TRAILING_WHITESPACE) if( POLICY CMP0007 ) cmake_policy( SET CMP0007 NEW ) endif() project(common-lib) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(UUIDLIB -luuid) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) # Find python3.x dev/lib package find_package(PkgConfig REQUIRED) if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) endif() # Find source files file(GLOB SOURCES *.cpp) # Include header files include_directories(include ../services/common/include ../common/include ../thirdparty/rapidjson/include ../thirdparty/Simple-Web-Server) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS} ${Python3_NUMPY_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) # Add Python 3.5 library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES} Python3::NumPy) endif() target_link_libraries(${PROJECT_NAME} ${UUIDLIB}) target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES}) target_link_libraries(${PROJECT_NAME} -lcrypto) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/common/JSONPath.cpp ================================================ /* * Fledge RapaidJSON JSONPath search helper * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <JSONPath.h> #include <logger.h> #include <cstring> #include <stdexcept> using namespace std; using namespace rapidjson; JSONPath::JSONPath(const string& path) : m_path(path) { m_logger = Logger::getLogger(); } /** * Destructor for the JSONPath * * Reclaim the vector of components. */ JSONPath::~JSONPath() { for (int i = 0; i < m_parsed.size(); i++) { delete m_parsed[i]; } } /** * Find the matching node in the JSON document * * param node The node to search from * @return the matching node. Throws an exception if there was no match */ Value *JSONPath::findNode(Value& root) { if (m_parsed.size() == 0) { parse(); } Value *node = &root; for (int i = 0; i < m_parsed.size(); i++) { node = m_parsed[i]->match(node); } return node; } /** * Parse the m_path JSON path. Throws an exception if there * was a parse error. * * The supported elements are * Literal object name /a * Array Index a[1] * Array with matching predicate a[name==value] */ void JSONPath::parse() { char *path, *ptr, *sp; path = strdup(m_path.c_str()); ptr = strtok_r(path, "/", &sp); while (ptr) { char *p = ptr; char *bstart = NULL, *bend = NULL, *bequal = NULL; while (*p) { if (*p == '[') { bstart = p + 1; } if (*p == ']') { bend = p - 1; } if (*p == '=' && *(p+1) == '=') { bequal = p; } p++; } if (bstart == NULL && bend == NULL && bequal == NULL) { string s(ptr); m_parsed.push_back(new LiteralPathComponent(s)); } if (bstart != NULL && bend != NULL) { if (bstart > bend) { m_logger->error("Invalid JSONPath '%s', malformed selector", path); goto done; } *(bstart - 1) = 0; string name(ptr); if (bequal == NULL) { char *eptr; long index = strtol(bstart, &eptr, 10); if (eptr != bend + 1) { m_logger->error("Invalid JSONPath '%s', expected numeric selector"); goto done; } m_parsed.push_back(new IndexPathComponent(name, index)); } else { char *property = bstart; char *value = bequal + 2; *(bend + 1) = 0; *bequal = 0; string p(property), v(value); m_parsed.push_back(new MatchPathComponent(name, p, v)); } } ptr = strtok_r(NULL, "/", &sp); } done: free(path); } /** * A match against a literal path component */ JSONPath::LiteralPathComponent::LiteralPathComponent(string& name) : m_name(name) { } /** * Return the child object of node that matchs the literal name given * * @param node The node to match * @return pointer to the matching node */ rapidjson::Value *JSONPath::LiteralPathComponent::match(rapidjson::Value *node) { if (node->IsObject() && node->HasMember(m_name.c_str())) { return &((*node)[m_name.c_str()]); } throw runtime_error("Document has no member " + m_name); } /** * A match against an array index */ JSONPath::IndexPathComponent::IndexPathComponent(string& name, int index) : m_name(name), m_index(index) { } /** * Return the object at the index position of the specified array * * @param node The node to match * @return pointer to the matching node */ rapidjson::Value *JSONPath::IndexPathComponent::match(rapidjson::Value *node) { if (node->IsObject() && node->HasMember(m_name.c_str())) { Value& n = (*node)[m_name.c_str()]; if (n.IsArray()) { return &n[m_index]; } } throw runtime_error("Document has no member " + m_name + " or it is not an array"); } /** * Amatch against an object that hase a particular name/value pair */ JSONPath::MatchPathComponent::MatchPathComponent(string& name, string& property, string& value) : m_name(name), m_property(property), m_value(value) { } /** * Match a node within an array or object * * @param node The node to match * @return pointer to the matching node */ rapidjson::Value *JSONPath::MatchPathComponent::match(rapidjson::Value *node) { if (node->IsObject() && node->HasMember(m_name.c_str())) { Value& n = (*node)[m_name.c_str()]; if (n.IsArray()) { for (auto& v : n.GetArray()) { if (v.IsObject()) { if (v.HasMember(m_property.c_str())) { if (v[m_property.c_str()].IsString() && m_value.compare(v[m_property.c_str()].GetString()) == 0) return &v; if (v[m_property.c_str()].IsInt()) { long val = v[m_property.c_str()].GetInt(); long tval = strtol(m_value.c_str(), NULL, 10); if (val == tval) return &v; } else if (v[m_property.c_str()].IsDouble()) { double val = v[m_property.c_str()].GetDouble(); double tval = strtod(m_value.c_str(), NULL); if (val == tval) return &v; } else if (v[m_property.c_str()].IsBool()) { bool val = v[m_property.c_str()].GetBool(); if (val && (m_value.compare("true") == 0 || m_value.compare("TRUE") == 0)) return &v; if (val == false && (m_value.compare("false") == 0 || m_value.compare("FALSE") == 0)) return &v; } } } } } } throw runtime_error(string("Document has no member ") + m_name + string(" or it does not have a ") + m_property + " property"); } ================================================ FILE: C/common/acl.cpp ================================================ /* * Fledge category management * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <stdexcept> #include <acl.h> #include <rapidjson/document.h> #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <storage_client.h> using namespace std; using namespace rapidjson; /** * ACLReason constructor: * parse input JSON for ACL change reason. * * JSON should have string attributes 'reason' and 'argument' * * @param json The JSON reason string to parse * @throws exception ACLReasonMalformed */ ACL::ACLReason::ACLReason(const string& json) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("ACL Reason parse error in %s: %s at %d", json.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset()); throw new ACLReasonMalformed(); } if (!doc.IsObject()) { Logger::getLogger()->error("ACL Reason is not a JSON object: %sd", json.c_str()); throw new ACLReasonMalformed(); } if (doc.HasMember("reason") && doc["reason"].IsString()) { m_reason = doc["reason"].GetString(); } if (doc.HasMember("argument") && doc["argument"].IsString()) { m_argument = doc["argument"].GetString(); } } /** * ACL constructor: * parse input JSON for ACL content. * * JSON should have string attributes 'name' and 'service' and 'url' arrays * * @param json The JSON ACL content to parse * @throws exception ACLMalformed */ ACL::ACL(const string& json) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("ACL parse error in %s: %s at %d", json.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset()); throw new ACLMalformed(); } Logger::getLogger()->debug("ACL content is %s", json.c_str()); if (!doc.HasMember("name")) { Logger::getLogger()->error("Missing 'name' attribute in ACL JSON data"); throw new ACLMalformed(); } if (doc.HasMember("name") && doc["name"].IsString()) { m_name = doc["name"].GetString(); } // Check for service array item if (doc.HasMember("service") && doc["service"].IsArray()) { auto &items = doc["service"]; for (auto& item : items.GetArray()) { if (!item.IsObject()) { throw new ACLMalformed(); } for (Value::ConstMemberIterator itr = item.MemberBegin(); itr != item.MemberEnd(); ++itr) { // Construct KeyValueItem object KeyValueItem i(itr->name.GetString(), itr->value.GetString()); // Add object to the vector m_service.push_back(i); } } } // Check for url array item if (doc.HasMember("url") && doc["url"].IsArray()) { auto &items = doc["url"]; for (auto& item : items.GetArray()) { if (!item.IsObject()) { throw new ACLMalformed(); } string url = item["url"].GetString(); Value &acl = item["acl"]; vector<KeyValueItem> v_acl; // Check for acl array if (acl.IsArray()) { for (auto& item : acl.GetArray()) { if (!item.IsObject()) { throw new ACLMalformed(); } for (Value::ConstMemberIterator itr = item.MemberBegin(); itr != item.MemberEnd(); ++itr) { // Construct KeyValueItem object KeyValueItem item(itr->name.GetString(), itr->value.GetString()); // Add object to the ACL vector v_acl.push_back(item); } } } // Construct UrlItem with url and ACL vector UrlItem u(url, v_acl); // Add object to the URL vector m_url.push_back(u); } } } ================================================ FILE: C/common/aggregate.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <aggregate.h> #include <string> #include <sstream> #include <iostream> using namespace std; /** * Return the JSON payload for a where clause */ string Aggregate::toJSON() { ostringstream json; json << "{ \"column\" : \"" << m_column << "\","; json << " \"operation\" : \"" << m_operation << "\" }"; return json.str(); } ================================================ FILE: C/common/asset_tracking.cpp ================================================ /* * Fledge asset tracking related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora, Massimiliano Pinto */ #include <logger.h> #include <asset_tracking.h> #include <config_category.h> #include "string_utils.h" using namespace std; AssetTracker *AssetTracker::instance = 0; /** * Worker thread entry point */ static void worker(void *arg) { AssetTracker *tracker = (AssetTracker *)arg; tracker->workerThread(); } /** * Get asset tracker singleton instance for the current south service * * @return Singleton asset tracker instance */ AssetTracker *AssetTracker::getAssetTracker() { return instance; } /** * AssetTracker class constructor * * @param mgtClient Management client object for this south service * @param service Service name */ AssetTracker::AssetTracker(ManagementClient *mgtClient, string service) : m_mgtClient(mgtClient), m_service(service), m_updateInterval(MIN_ASSET_TRACKER_UPDATE) { instance = this; m_shutdown = false; m_storageClient = NULL; m_thread = new thread(worker, this); try { // Find out the name of the fledge service ConfigCategory category = mgtClient->getCategory("service"); if (category.itemExists("name")) { m_fledgeName = category.getValue("name"); } } catch (exception& ex) { Logger::getLogger()->error("Unable to fetch the service category, %s", ex.what()); } try { // Get a handle on the storage layer ServiceRecord storageRecord("Fledge Storage"); if (!m_mgtClient->getService(storageRecord)) { Logger::getLogger()->fatal("Unable to find storage service"); return; } Logger::getLogger()->info("Connect to storage on %s:%d", storageRecord.getAddress().c_str(), storageRecord.getPort()); m_storageClient = new StorageClient(storageRecord.getAddress(), storageRecord.getPort()); } catch (exception& ex) { Logger::getLogger()->error("Failed to create storage client", ex.what()); } } /** * Destructor for the asset tracker. We must make sure any pending * tuples are written out before the asset tracker is destroyed. */ AssetTracker::~AssetTracker() { m_shutdown = true; // Signal the worker thread to flush the queue { unique_lock<mutex> lck(m_mutex); m_cv.notify_all(); } while (m_pending.size()) { // Wait for pending queue to drain this_thread::sleep_for(chrono::milliseconds(10)); } if (m_thread) { m_thread->join(); delete m_thread; m_thread = NULL; } if (m_storageClient) { delete m_storageClient; m_storageClient = NULL; } for (auto& item : assetTrackerTuplesCache) { delete item; } assetTrackerTuplesCache.clear(); for (auto& store : storageAssetTrackerTuplesCache) { delete store.first; } storageAssetTrackerTuplesCache.clear(); } /** * Fetch all asset tracking tuples from DB and populate local cache * * Return the vector of deprecated asset names * * @param plugin Plugin name * @param event Event name */ void AssetTracker::populateAssetTrackingCache(string /*plugin*/, string /*event*/) { try { std::vector<AssetTrackingTuple*>& vec = m_mgtClient->getAssetTrackingTuples(m_service); for (AssetTrackingTuple* & rec : vec) { assetTrackerTuplesCache.emplace(rec); } delete (&vec); } catch (...) { Logger::getLogger()->error("Failed to populate asset tracking tuples' cache"); return; } return; } /** * Check local cache for a given asset tracking tuple * * @param tuple Tuple to find in cache * @return Returns whether tuple is present in cache */ bool AssetTracker::checkAssetTrackingCache(AssetTrackingTuple& tuple) { AssetTrackingTuple *ptr = &tuple; std::unordered_set<AssetTrackingTuple*>::const_iterator it = assetTrackerTuplesCache.find(ptr); if (it == assetTrackerTuplesCache.end()) { return false; } else return true; } /** * Lookup tuple in the asset tracker cache * * @param tuple The tuple to lookup * @return NULL if the tuple is not in the cache or the tuple from the cache */ AssetTrackingTuple* AssetTracker::findAssetTrackingCache(AssetTrackingTuple& tuple) { AssetTrackingTuple *ptr = &tuple; std::unordered_set<AssetTrackingTuple*>::const_iterator it = assetTrackerTuplesCache.find(ptr); if (it == assetTrackerTuplesCache.end()) { return NULL; } else { return *it; } } /** * Add asset tracking tuple via microservice management API and in cache * * @param tuple New tuple to add in DB and in cache */ void AssetTracker::addAssetTrackingTuple(AssetTrackingTuple& tuple) { std::unordered_set<AssetTrackingTuple*>::const_iterator it = assetTrackerTuplesCache.find(&tuple); if (it == assetTrackerTuplesCache.end()) { AssetTrackingTuple *ptr = new AssetTrackingTuple(tuple); assetTrackerTuplesCache.emplace(ptr); queue(ptr); Logger::getLogger()->debug("addAssetTrackingTuple(): Added tuple to cache: '%s'", tuple.assetToString().c_str()); } } /** * Add asset tracking tuple via microservice management API and in cache * * @param plugin Plugin name * @param asset Asset name * @param event Event name */ void AssetTracker::addAssetTrackingTuple(string plugin, string asset, string event) { // in case of "Filter" event, 'plugin' input argument is category name, so remove service name (prefix) & '_' from it if (event == string("Filter")) { string pattern = m_service + "_"; if (plugin.find(pattern) != string::npos) plugin.erase(plugin.begin(), plugin.begin() + m_service.length() + 1); } asset = escape(asset); AssetTrackingTuple tuple(m_service, plugin, asset, event); addAssetTrackingTuple(tuple); } /** * Return the name of the service responsible for particular event of the named asset * * @param event The event of interest * @param asset The asset we are interested in * @return string The service name of the service that ingests the asset * @throws exception If the service could not be found */ string AssetTracker::getService(const std::string& event, const std::string& asset) { // Fetch all asset tracker records std::vector<AssetTrackingTuple*>& vec = m_mgtClient->getAssetTrackingTuples(); string foundService; for (AssetTrackingTuple* &rec : vec) { // Return first service name with given asset and event if (rec->m_assetName == asset && rec->m_eventName == event) { foundService = rec->m_serviceName; break; } } delete (&vec); // Return found service or raise an exception if (foundService != "") { return foundService; } else { Logger::getLogger()->error("No service found for asset '%s' and event '%s'", event.c_str(), asset.c_str()); throw runtime_error("Fetching service for asset not yet implemented"); } } /** * Constructor for an asset tracking tuple table */ AssetTrackingTable::AssetTrackingTable() { } /** * Destructor for asset tracking tuple table */ AssetTrackingTable::~AssetTrackingTable() { for (auto t : m_tuples) { delete t.second; } } /** * Add a tuple to an asset tracking table * * @param tuple Pointer to the asset tracking tuple to add */ void AssetTrackingTable::add(AssetTrackingTuple *tuple) { auto ret = m_tuples.insert(pair<string, AssetTrackingTuple *>(tuple->getAssetName(), tuple)); if (ret.second == false) delete tuple; // Already exists } /** * Find the named asset tuple and return a pointer to te asset * * @param name The name of the asset to lookup * @return AssetTrackingTupple* The matchign tuple or NULL */ AssetTrackingTuple *AssetTrackingTable::find(const string& name) { auto ret = m_tuples.find(name); if (ret != m_tuples.end()) return ret->second; return NULL; } /** * Remove an asset tracking tuple from the table */ void AssetTrackingTable::remove(const string& name) { auto ret = m_tuples.find(name); if (ret != m_tuples.end()) { m_tuples.erase(ret); delete ret->second; // Free the tuple } } /** * Queue an asset tuple for writing to the database. */ void AssetTracker::queue(TrackingTuple *tuple) { unique_lock<mutex> lck(m_mutex); m_pending.emplace(tuple); m_cv.notify_all(); } /** * Set the update interval for the asset tracker. * * @param interval The number of milliseconds between update of the asset tracker * @return bool Was the update accepted */ bool AssetTracker::tune(unsigned long interval) { unique_lock<mutex> lck(m_mutex); if (interval >= MIN_ASSET_TRACKER_UPDATE) { m_updateInterval = interval; } else { Logger::getLogger()->error("Attempt to set asset tracker update to less than minimum interval"); return false; } return true; } /** * The worker thread that will flush any pending asset tuples to * the database. */ void AssetTracker::workerThread() { unique_lock<mutex> lck(m_mutex); while (m_pending.empty() && m_shutdown == false) { m_cv.wait_for(lck, chrono::milliseconds(m_updateInterval)); processQueue(); } // Process any items left in the queue at shutdown processQueue(); } /** * Process the queue of asset tracking tuple */ void AssetTracker::processQueue() { vector<InsertValues> values; static bool warned = false; while (!m_pending.empty()) { // Get first element as TrackingTuple calss TrackingTuple *tuple = m_pending.front(); // Write the tuple - ideally we would like a bulk update here or to go direct to the // database. However we need the Fledge service name for that, which is now in // the member variable m_fledgeName bool warn = warned; // Call class specialised processData routine: // - 1 Insert asset tracker data via Fledge API as fallback // or // - get values for direct DB operation InsertValues iValue = tuple->processData(m_storageClient != NULL, m_mgtClient, warn, m_fledgeName); warned = warn; // Bulk DB insert when queue is empty if (iValue.size() > 0) { values.push_back(iValue); } // Remove element m_pending.pop(); } // Queue processed, bulk direct DB data insert could be done if (m_storageClient && values.size() > 0) { // Bulk DB insert int n_rows = m_storageClient->insertTable("asset_tracker", values); if (n_rows != values.size()) { Logger::getLogger()->warn("The asset tracker failed to insert all records %d of %d inserted", n_rows, values.size()); } } } /** * Fetch all storage asset tracking tuples from DB and populate local cache * * Return the vector of deprecated asset names * */ void AssetTracker::populateStorageAssetTrackingCache() { try { std::vector<StorageAssetTrackingTuple*>& vec = (std::vector<StorageAssetTrackingTuple*>&) m_mgtClient->getStorageAssetTrackingTuples(m_service); for (StorageAssetTrackingTuple* & rec : vec) { set<string> setOfDPs = getDataPointsSet(rec->m_datapoints); if (setOfDPs.size() == 0) { Logger::getLogger()->warn("%s:%d Datapoints unavailable for service %s ", __FUNCTION__, __LINE__, m_service.c_str()); } // Add item into cache storageAssetTrackerTuplesCache.emplace(rec, setOfDPs); } delete (&vec); } catch (...) { Logger::getLogger()->error("%s:%d Failed to populate storage asset " \ "tracking tuples' cache", __FUNCTION__, __LINE__); return; } return; } //This function takes a string of datapoints in comma-separated format and returns //set of string datapoint values std::set<std::string> AssetTracker::getDataPointsSet(std::string strDatapoints) { std::set<std::string> tokens; stringstream st(strDatapoints); std::string temp; while(getline(st, temp, ',')) { tokens.insert(temp); } return tokens; } /** * Return Plugin Information in the Fledge configuration * * @return bool True if the plugin info could be obtained */ bool AssetTracker::getFledgeConfigInfo() { Logger::getLogger()->error("StorageAssetTracker::getPluginInfo start"); try { string url = "/fledge/category/service"; if (!m_mgtClient) { Logger::getLogger()->error("%s:%d, m_mgtClient Ptr is NULL", __FUNCTION__, __LINE__); return false; } auto res = m_mgtClient->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); Logger::getLogger()->error("%s fetching service record: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); return false; } else if (doc.HasMember("message")) { Logger::getLogger()->error("Failed to fetch /fledge/category/service %s.", doc["message"].GetString()); return false; } else { Value& serviceName = doc["name"]; if (!serviceName.IsObject()) { Logger::getLogger()->error("%s:%d, serviceName is not an object", __FUNCTION__, __LINE__); return false; } if (!serviceName.HasMember("value")) { Logger::getLogger()->error("%s:%d, serviceName has no member value", __FUNCTION__, __LINE__); return false; } Value& serviceVal = serviceName["value"]; if ( !serviceVal.IsString()) { Logger::getLogger()->error("%s:%d, serviceVal is not a string", __FUNCTION__, __LINE__); return false; } m_fledgeName = serviceVal.GetString(); Logger::getLogger()->error("%s:%d, m_plugin value = %s", __FUNCTION__, __LINE__, m_fledgeName.c_str()); return true; } } catch (const SimpleWeb::system_error &e) { Logger::getLogger()->error("Get service failed %s.", e.what()); return false; } return false; } /** This function takes a StorageAssetTrackingTuple pointer and searches for * it in cache, if found then returns its Deprecated status * * @param ptr StorageAssetTrackingTuple* , as key in cache (map) * @return bool Deprecation status */ bool AssetTracker::getDeprecated(StorageAssetTrackingTuple* ptr) { StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); if (it == storageAssetTrackerTuplesCache.end()) { Logger::getLogger()->debug("%s:%d :tuple not found in cache", __FUNCTION__, __LINE__); return false; } else { return (it->first)->isDeprecated(); } return false; } /** * Updates datapoints present in the arg dpSet in the cache * * @param dpSet set of datapoints string values to be updated in cache * @param ptr StorageAssetTrackingTuple* , as key in cache (map) * Retval void */ void AssetTracker::updateCache(std::set<std::string> dpSet, StorageAssetTrackingTuple* ptr) { if(ptr == nullptr) { Logger::getLogger()->error("%s:%d: StorageAssetTrackingTuple should not be NULL pointer", __FUNCTION__, __LINE__); return; } StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(ptr); // search for the record in cache , if not present, simply update cache and return if (it == storageAssetTrackerTuplesCache.end()) { Logger::getLogger()->debug("%s:%d :tuple not found in cache '%s', ptr '%p'", __FUNCTION__, __LINE__, ptr->assetToString().c_str(), ptr); // Create new tuple, add it to processing queue and to cache addStorageAssetTrackingTuple(*ptr, dpSet, true); return; } else { Logger::getLogger()->debug("%s:%d :tuple found in cache '%p', '%s': datapoints '%d'", __FUNCTION__, __LINE__, (it->first), (it->first)->assetToString().c_str(), (it->second).size()); // record is found in cache , compare the datapoints of the argument ptr to that present in the cache // update the cache with datapoints present in argument record but absent in cache std::set<std::string> &cacheRecord = it->second; unsigned int sizeOfCacheRecord = cacheRecord.size(); // store all the datapoints to be updated in string strDatapoints which is sent to management_client std::string strDatapoints; unsigned int count = 0; for (auto itr : cacheRecord) { strDatapoints.append(itr); strDatapoints.append(","); count++; } // check which datapoints are not present in cache record, and need to be updated // in cache and db, store them in string strDatapoints, in comma-separated format for(auto itr: dpSet) { if (cacheRecord.find(itr) == cacheRecord.end()) { strDatapoints.append(itr); strDatapoints.append(","); count++; } } // remove the last comma if (strDatapoints[strDatapoints.size()-1] == ',') { strDatapoints.pop_back(); } if (count <= sizeOfCacheRecord) { // No need to update as count of cache record is not getting increased return; } // Add current StorageAssetTrackingTuple to the process queue addStorageAssetTrackingTuple(*(it->first), dpSet); // if update of DB successful , then update the CacheRecord for(auto itr: dpSet) { if (cacheRecord.find(itr) == cacheRecord.end()) { cacheRecord.insert(itr); } } } } /** * Add asset tracking tuple via microservice management API and in cache * * @param tuple New tuple to add to the queue * @param dpSet Set of datapoints to handle * @param addObj Create a new obj for cache and queue if true. * Otherwise just add current tuple to processing queue. */ void AssetTracker::addStorageAssetTrackingTuple(StorageAssetTrackingTuple& tuple, std::set<std::string>& dpSet, bool addObj) { // Create a comma separated list of datapoints std::string strDatapoints; unsigned int count = 0; for (auto itr : dpSet) { strDatapoints.append(itr); strDatapoints.append(","); count++; } if (strDatapoints[strDatapoints.size()-1] == ',') { strDatapoints.pop_back(); } if (addObj) { // Create new tuple from input one StorageAssetTrackingTuple *ptr = new StorageAssetTrackingTuple(tuple); // Add new tuple to storage asset cache storageAssetTrackerTuplesCache.emplace(ptr, dpSet); // Add datapoints and count needed for data insert ptr->m_datapoints = strDatapoints; ptr->m_maxCount = count; // Add new tuple to processing queue queue(ptr); } else { // Add datapoints and count needed for data insert tuple.m_datapoints = strDatapoints; tuple.m_maxCount = count; // Just add current tuple to processing queue queue(&tuple); } } /** * Insert AssetTrackingTuple data via Fledge core API * or prepare InsertValues object for direct DB operation * * @param storage Boolean for storage being available * @param mgtClient ManagementClient object pointer * @param warned Boolean ireference updated for logging operation * @param instanceName Fledge instance name * @return InsertValues object */ InsertValues AssetTrackingTuple::processData(bool storage, ManagementClient *mgtClient, bool &warned, string &instanceName) { InsertValues iValue; // Write the tuple - ideally we would like a bulk update here or to go direct to the // database. However we need the Fledge service name passed in instanceName if (!storage) { // Fall back to using interface to the core if (!warned) { Logger::getLogger()->warn("Asset tracker falling back to core API"); } warned = true; mgtClient->addAssetTrackingTuple(m_serviceName, m_pluginName, m_assetName, m_eventName); } else { iValue.push_back(InsertValue("asset", m_assetName)); iValue.push_back(InsertValue("event", m_eventName)); iValue.push_back(InsertValue("service", m_serviceName)); iValue.push_back(InsertValue("fledge", instanceName)); iValue.push_back(InsertValue("plugin", m_pluginName)); } return iValue; } /** * Insert StorageAssetTrackingTuple data via Fledge core API * or prepare InsertValues object for direct DB operation * * @param storage Boolean for storage being available * @param mgtClient ManagementClient object pointer * @param warned Boolean ireference updated for logging operation * @param instanceName Fledge instance name * @return InsertValues object */ InsertValues StorageAssetTrackingTuple::processData(bool storage, ManagementClient *mgtClient, bool &warned, string &instanceName) { InsertValues iValue; // Write the tuple - ideally we would like a bulk update here or to go direct to the // database. However we need the Fledge service name for that, which is now in // the member variable m_fledgeName if (!storage) { // Fall back to using interface to the core if (!warned) { Logger::getLogger()->warn("Storage Asset tracker falling back to core API"); } warned = true; // Insert tuple via Fledge core API mgtClient->addStorageAssetTrackingTuple(m_serviceName, m_pluginName, m_assetName, m_eventName, false, m_datapoints, m_maxCount); } else { iValue.push_back(InsertValue("asset", m_assetName)); iValue.push_back(InsertValue("event", m_eventName)); iValue.push_back(InsertValue("service", m_serviceName)); iValue.push_back(InsertValue("fledge", instanceName)); iValue.push_back(InsertValue("plugin", m_pluginName)); // prepare JSON datapoints string datapoints = "\""; for ( int i = 0; i < m_datapoints.size(); ++i) { if (m_datapoints[i] == ',') { datapoints.append("\",\""); } else { datapoints.append(1,m_datapoints[i]); } } datapoints.append("\""); Document doc; string jsonData = "{\"count\": " + std::to_string(m_maxCount) + ", \"datapoints\": [" + datapoints + "]}"; doc.Parse(jsonData.c_str()); iValue.push_back(InsertValue("data", doc)); } return iValue; } /** * Check if a StorageAssetTrackingTuple is in cache * * @param tuple The StorageAssetTrackingTuple to find * @return Pointer to found tuple or NULL */ StorageAssetTrackingTuple* AssetTracker::findStorageAssetTrackingCache(StorageAssetTrackingTuple& tuple) { StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(&tuple); if (it == storageAssetTrackerTuplesCache.end()) { return NULL; } else { return it->first; } } /** * Get stored value in the StorageAssetTrackingTuple cache for the given tuple * * @param tuple The StorageAssetTrackingTuple to find * @return Pointer to found std::set<std::string> result or NULL if tuble does not exist */ std::set<std::string>* AssetTracker::getStorageAssetTrackingCacheData(StorageAssetTrackingTuple* tuple) { StorageAssetCacheMapItr it = storageAssetTrackerTuplesCache.find(tuple); if (it == storageAssetTrackerTuplesCache.end()) { return NULL; } else { return &(it->second); } } ================================================ FILE: C/common/audit_logger.cpp ================================================ /* * Fledge Singleton Audit Logger interface * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <audit_logger.h> AuditLogger *AuditLogger::m_instance = 0; using namespace std; /** * Constructor for an audit logger that is passed * the management client. This must be called early in * a service or task creation before any audit logs are * created. * * @param mgmt Pointer to the management client */ AuditLogger::AuditLogger(ManagementClient *mgmt) : m_mgmt(mgmt) { m_instance = this; } /** * Destructor for an audit logger */ AuditLogger::~AuditLogger() { } /** * Get the audit logger singleton */ AuditLogger *AuditLogger::getLogger() { if (!m_instance) { Logger::getLogger()->error("An attempt has been made to obtain the audit logger before it has been created."); } return m_instance; } void AuditLogger::auditLog(const string& code, const string& level, const string& data) { if (m_instance) { m_instance->audit(code, level, data); } else { Logger::getLogger()->error("An attempt has been made to log an audit event when no audit logger is available"); Logger::getLogger()->error("Audit event is: %s, %s, %s", code.c_str(), level.c_str(), data.c_str()); } } /** * Log an audit message * * @param code The audit code * @param level The audit level * @param data Optional data associated with the audit entry */ void AuditLogger::audit(const string& code, const string& level, const string& data) { m_mgmt->addAuditEntry(code, level, data); } ================================================ FILE: C/common/base64databuffer.cpp ================================================ /* * Fledge Base64 encoded DataBuffer * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <base64databuffer.h> using namespace std; /** * Construct a DataBuffer by decoding a Base64 encoded buffer */ Base64DataBuffer::Base64DataBuffer(const string& encoded) { m_data = NULL; m_itemSize = encoded[0] - '0'; size_t in_len = encoded.size() - 1; if (in_len % 4 != 0) { throw runtime_error("Base64DataBuffer string is incorrect length"); } size_t maxLen = in_len / 4 * 3; if (encoded[in_len - 1] == '=') maxLen--; if (encoded[in_len - 2] == '=') maxLen--; m_len = maxLen / m_itemSize; if ((m_data = malloc(maxLen)) == NULL) { throw runtime_error("Base64DataBuffer insufficient memory to store data"); } uint8_t *data = (uint8_t *)m_data; for (size_t i = 0, j = 0; i < in_len;) { uint32_t a = encoded[i] == '=' ? 0 & i++ : decodingTable[static_cast<int>(encoded[i++])]; uint32_t b = encoded[i] == '=' ? 0 & i++ : decodingTable[static_cast<int>(encoded[i++])]; uint32_t c = encoded[i] == '=' ? 0 & i++ : decodingTable[static_cast<int>(encoded[i++])]; uint32_t d = encoded[i] == '=' ? 0 & i++ : decodingTable[static_cast<int>(encoded[i++])]; uint32_t triple = (a << 3 * 6) + (b << 2 * 6) + (c << 1 * 6) + (d << 0 * 6); if (j < maxLen) data[j++] = (triple >> 2 * 8) & 0xFF; if (j < maxLen) data[j++] = (triple >> 1 * 8) & 0xFF; if (j < maxLen) data[j++] = (triple >> 0 * 8) & 0xFF; } } /** * Base 64 encode the DataBuffer. Not the first character is * not the data itself but an unencoded value for itemSize */ string Base64DataBuffer::encode() { size_t nBytes = m_itemSize * m_len; size_t encoded = 4 * ((nBytes + 2) / 3); char *ret = (char *)malloc(encoded + 1); char *p = ret; *p++ = m_itemSize + '0'; uint8_t *data = (uint8_t *)m_data; int i; for (i = 0; i < m_len - 2; i += 3) { *p++ = encodingTable[(*data >> 2) & 0x3F]; *p++ = encodingTable[((*data & 0x3) << 4) | ((int) (*(data + 1) & 0xF0) >> 4)]; *p++ = encodingTable[((*(data + 1) & 0xF) << 2) | ((int) (*(data + 2) & 0xC0) >> 6)]; *p++ = encodingTable[*(data + 2) & 0x3F]; data += 3; } if (i < nBytes) { *p++ = encodingTable[(*data >> 2) & 0x3F]; if (i == (nBytes - 1)) { *p++ = encodingTable[((*data & 0x3) << 4)]; *p++ = '='; } else { *p++ = encodingTable[((*data & 0x3) << 4) | ((int) (*(data + 1) & 0xF0) >> 4)]; *p++ = encodingTable[((*(data + 1) & 0xF) << 2)]; } *p++ = '='; } *p = '\0'; string r = string(ret); free(ret); return r; } ================================================ FILE: C/common/base64image.cpp ================================================ /* * Fledge Base64 encoded datapoint image * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <base64dpimage.h> #include <logger.h> #include <string.h> #include <sys/time.h> using namespace std; /** * Construct a DPImage by decoding a Base64 encoded buffer */ Base64DPImage::Base64DPImage(const string& data) { sscanf(data.c_str(), "%d,%d,%d_", &m_width, &m_height, &m_depth); m_byteSize = m_width * m_height * (m_depth / 8); size_t pos = data.find_first_of("_"); string encoded; if (pos != string::npos) { encoded = data.substr(pos + 1); } size_t in_len = encoded.size(); if (in_len % 4 != 0) { throw runtime_error("Base64DataBuffer string is incorrect length"); } if ((m_pixels = malloc(m_byteSize)) == NULL) { throw runtime_error("Base64DataBuffer insufficient memory to store data"); } uint8_t *ptr = (uint8_t *)m_pixels; for (size_t i = 0, j = 0; i < in_len;) { uint32_t a = encoded[i] == '=' ? 0 & i++ : decodingTable[(uint8_t)(encoded[i++])]; uint32_t b = encoded[i] == '=' ? 0 & i++ : decodingTable[(uint8_t)(encoded[i++])]; uint32_t c = encoded[i] == '=' ? 0 & i++ : decodingTable[(uint8_t)(encoded[i++])]; uint32_t d = encoded[i] == '=' ? 0 & i++ : decodingTable[(uint8_t)(encoded[i++])]; uint32_t triple = (a << 3 * 6) + (b << 2 * 6) + (c << 1 * 6) + (d << 0 * 6); if (j < m_byteSize) ptr[j++] = (triple >> 2 * 8) & 0xFF; if (j < m_byteSize) ptr[j++] = (triple >> 1 * 8) & 0xFF; if (j < m_byteSize) ptr[j++] = (triple >> 0 * 8) & 0xFF; } } /** * Base 64 encode the DPImage. Note the first character is * not the data itself but an unencoded value for itemSize */ string Base64DPImage::encode() { char buf[80]; int hlen = snprintf(buf, sizeof(buf), "%d,%d,%d_", m_width, m_height, m_depth); size_t nBytes = m_byteSize; size_t encoded = 4 * ((nBytes + 2) / 3); uint8_t *ret = (uint8_t *)malloc(hlen + encoded + 1); strcpy((char *)ret, buf); register uint8_t *p = ret + hlen; register uint8_t *data = (uint8_t *)m_pixels; int i; for (i = 0; i < m_byteSize - 2; i += 3) { *p++ = encodingTable[(*data >> 2) & 0x3F]; *p++ = encodingTable[((*data & 0x3) << 4) | ((unsigned int) (*(data + 1) & 0xF0) >> 4)]; *p++ = encodingTable[((*(data + 1) & 0xF) << 2) | ((unsigned int) (*(data + 2) & 0xC0) >> 6)]; *p++ = encodingTable[*(data + 2) & 0x3F]; data += 3; } if (i < nBytes) { *p++ = encodingTable[(*data >> 2) & 0x3F]; if (i == (nBytes - 1)) { *p++ = encodingTable[((*data & 0x3) << 4)]; *p++ = '='; } else { *p++ = encodingTable[((*data & 0x3) << 4) | ((unsigned int) (*(data + 1) & 0xF0) >> 4)]; *p++ = encodingTable[((*(data + 1) & 0xF) << 2)]; } *p++ = '='; } *p = '\0'; string rstr((char *)ret); free(ret); return rstr; } ================================================ FILE: C/common/bearer_token.cpp ================================================ /* * Fledge bearer token utilities * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include "bearer_token.h" #include <rapidjson/document.h> #include <logger.h> using namespace rapidjson; using namespace std; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; /** * BearerToken constructor with request object * * @param request HTTP request object */ BearerToken::BearerToken(shared_ptr<HttpServer::Request> request) { string bearer_token; // Extract access bearer token from request headers for(auto &field : request->header) { if (field.first == AUTH_HEADER) { std::size_t pos = field.second.rfind(BEARER_SCHEMA); if (pos != string::npos) { pos += strlen(BEARER_SCHEMA); m_bearer_token = field.second.substr(pos); } } } m_expiration = 0; m_verified = false; } /** * BearerToken constructor with string reference * @param token Bearer token string */ BearerToken::BearerToken(std::string& token) : m_bearer_token(token) { m_expiration = 0; m_verified = false; } /** * BearerToken verification from JSON string reference * * Known token claims as stored as strings * * @param response JSON string from token verification endpoint * @return True on success * False otherwise */ bool BearerToken::verify(const string& response) { if (m_bearer_token.length() == 0) { return false; } Logger *log = Logger::getLogger(); Document doc; doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); log->error("%s error in service token verification: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); return false; } // Check JSON error item if (doc.HasMember("error")) { if (doc["error"].IsString()) { string error = doc["error"].GetString(); log->error("Failed to parse token verification result, error %s", error.c_str()); } else { log->error("Failed to parse token verification result: %s", response.c_str()); } return false; } // Check JSON claim items if (doc.HasMember("aud") && doc.HasMember("sub") && doc.HasMember("iss") && doc.HasMember("exp")) { // Set token claims in the input map if (doc["aud"].IsString() && doc["sub"].IsString() && doc["iss"].IsString() && doc["exp"].IsUint()) { // Valid data: set claim values, expiration and verified m_audience = doc["aud"].GetString(); m_subject = doc["sub"].GetString(); m_issuer = doc["iss"].GetString(); m_expiration = doc["exp"].GetUint(); m_verified = true; log->debug("Token verified %s:%s, expiration %ld", m_audience.c_str(), m_subject.c_str(), m_expiration); } else { log->error("Token claims do not contain valid values: %s", response.c_str()); } } else { log->error("Needed token claims not found: %s", response.c_str()); } return m_verified; } ================================================ FILE: C/common/config_category.cpp ================================================ /* * Fledge category management * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <config_category.h> #include <string> #include <rapidjson/document.h> #include <rapidjson/ostreamwrapper.h> #include <rapidjson/writer.h> #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <sstream> #include <iostream> #include <time.h> #include <stdlib.h> #include <logger.h> #include <stdexcept> #include <string_utils.h> #include <boost/algorithm/string/replace.hpp> using namespace std; using namespace rapidjson; /** * ConfigCategories constructor without parameters * * Elements can be added with ConfigCategories::addCategoryDescription */ ConfigCategories::ConfigCategories() { } /** * Construct a ConfigCategories object from a JSON document returned from * the Fledge configuratrion service. */ ConfigCategories::ConfigCategories(const std::string& json) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("Configuration parse error in %s: %s at %d, '%s'", json.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), StringAround(json, (unsigned)doc.GetErrorOffset()).c_str()); throw new ConfigMalformed(); } if (doc.HasMember("categories")) { const Value& categories = doc["categories"]; if (categories.IsArray()) { // Process every rows and create the result set for (auto& cat : categories.GetArray()) { if (!cat.IsObject()) { throw new ConfigMalformed(); } ConfigCategoryDescription *value = new ConfigCategoryDescription(cat["key"].GetString(), cat["description"].GetString()); m_categories.push_back(value); } } else { throw new ConfigMalformed(); } } } /** * ConfigCategories destructor */ ConfigCategories::~ConfigCategories() { for (auto it = m_categories.cbegin(); it != m_categories.cend(); it++) { delete *it; } } /** * Add a ConfigCategoryDescription element * * @param elem The ConfigCategoryDescription elemen to add */ void ConfigCategories::addCategoryDescription(ConfigCategoryDescription* elem) { m_categories.push_back(elem); } /** * Return the JSON string of a ConfigCategoryDescription element */ string ConfigCategoryDescription::toJSON() const { ostringstream convert; convert << "{\"key\": \"" << JSONescape(m_name) << "\", "; convert << "\"description\" : \"" << JSONescape(m_description) << "\"}"; return convert.str(); } /** * Return the JSON string of all ConfigCategoryDescription * elements in m_categories */ string ConfigCategories::toJSON() const { ostringstream convert; convert << "["; for (auto it = m_categories.cbegin(); it != m_categories.cend(); it++) { convert << (*it)->toJSON(); if (it + 1 != m_categories.cend() ) { convert << ", "; } } convert << "]"; return convert.str(); } /** * Configuration Category constructor * * @param name The name of the configuration category * @param json JSON content of the configuration category */ ConfigCategory::ConfigCategory(const string& name, const string& json) : m_name(name) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("Configuration parse error in category '%s', %s: %s at %d, '%s'", name.c_str(), json.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), StringAround(json, (unsigned)doc.GetErrorOffset()).c_str()); throw new ConfigMalformed(); } for (Value::ConstMemberIterator itr = doc.MemberBegin(); itr != doc.MemberEnd(); ++itr) { try { m_items.push_back(new CategoryItem(itr->name.GetString(), itr->value)); } catch (exception* e) { Logger::getLogger()->error("Configuration parse error in category '%s' item '%s', %s: %s", name.c_str(), itr->name.GetString(), json.c_str(), e->what()); delete e; throw ConfigMalformed(); } catch (...) { throw; } } } /** * Copy constructor for a configuration category * * @param rhs The configuration category to copy */ ConfigCategory::ConfigCategory(ConfigCategory const& rhs) { m_name = rhs.m_name; m_description = rhs.m_description; for (auto it = rhs.m_items.cbegin(); it != rhs.m_items.cend(); it++) { m_items.push_back(new CategoryItem(**it)); } } /** * Copy constructor for a configuration category when copying from a pointer * * @param rhs The configuration category to copy */ ConfigCategory::ConfigCategory(ConfigCategory const *rhs) { m_name = rhs->m_name; m_description = rhs->m_description; for (auto it = rhs->m_items.cbegin(); it != rhs->m_items.cend(); it++) { m_items.push_back(new CategoryItem(**it)); } } /** * Configuration category destructor */ ConfigCategory::~ConfigCategory() { for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { delete *it; } } /** * Operator= for ConfigCategory */ ConfigCategory& ConfigCategory::operator=(ConfigCategory const& rhs) { m_name = rhs.m_name; m_description = rhs.m_description; for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { delete *it; } m_items.clear(); for (auto it = rhs.m_items.cbegin(); it != rhs.m_items.cend(); it++) { m_items.push_back(new CategoryItem(**it)); } return *this; } /** * Operator+= for ConfigCategory */ ConfigCategory& ConfigCategory::operator+=(ConfigCategory const& rhs) { m_name = rhs.m_name; m_description = rhs.m_description; for (auto it = rhs.m_items.cbegin(); it != rhs.m_items.cend(); it++) { m_items.push_back(new CategoryItem(**it)); } return *this; } /** * Set the m_value from m_default for each item */ void ConfigCategory::setItemsValueFromDefault() { for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { (*it)->m_value = string((*it)->m_default); } } /** * Check whether at least one item in the category object * has both 'value' and 'default' set. * * @throws ConfigValueFoundWithDefault */ void ConfigCategory::checkDefaultValuesOnly() const { for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { if (!(*it)->m_value.empty()) { throw new ConfigValueFoundWithDefault((*it)->m_name); } } } /** * Add an item to a configuration category */ void ConfigCategory::addItem(const std::string& name, const std::string description, const std::string& type, const std::string def, const std::string& value) { m_items.push_back(new CategoryItem(name, description, type, def, value)); } /** * Add an item to a configuration category */ void ConfigCategory::addItem(const std::string& name, const std::string description, const std::string def, const std::string& value, const vector<string> options) { m_items.push_back(new CategoryItem(name, description, def, value, options)); } /** * Set the display name of an item * * @param name The item name in the category * @param displayName The display name to set * @return true if the item was found */ bool ConfigCategory::setItemDisplayName(const std::string& name, const std::string& displayName) { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { m_items[i]->m_displayName = displayName; return true; } } return false; } /** * Delete all the items from the configuration category having a specific type * * * @param type Type to delete */ void ConfigCategory::removeItemsType(ConfigCategory::ItemType type) { for (auto it = m_items.begin(); it != m_items.end(); ) { if ((*it)->m_itemType == type) { delete *it; m_items.erase(it); } else { ++it; } } } /** * Delete all the items from the configuration category * */ void ConfigCategory::removeItems() { for (auto it = m_items.begin(); it != m_items.end(); ) { delete *it; m_items.erase(it); } } /** * Delete all the items from the configuration category not having a specific type * * * @param type Type to maintain */ void ConfigCategory::keepItemsType(ConfigCategory::ItemType type) { for (auto it = m_items.begin(); it != m_items.end(); ) { if ((*it)->m_itemType != type) { delete *it; m_items.erase(it); } else { ++it; } } } /** * Extracts, process and adds subcategory information from a given category to the current instance * * * @param subCategories Configuration category from which the subcategories information should be extracted */ bool ConfigCategory::extractSubcategory(ConfigCategory &subCategories) { bool extracted; auto it = subCategories.m_items.begin(); if (it != subCategories.m_items.end()) { // Generates a new temporary category from the JSON in m_default ConfigCategory tmpCategory = ConfigCategory("tmpCategory", (*it)->m_default); // Extracts all the items generated from m_default and adds them to the category for(auto item : tmpCategory.m_items) { m_items.push_back(new CategoryItem(*item)); } m_name = (*it)->m_name; m_description = (*it)->m_description; // Replaces the %N escape sequence with the instance name of this plugin string instanceName = subCategories.m_name; string pattern = "%N"; if (m_name.find(pattern) != string::npos) m_name.replace(m_name.find(pattern), pattern.length(), instanceName); // Removes the element just processed delete *it; subCategories.m_items.erase(it); extracted = true; } else { extracted = false; } return extracted; } /** * Check for the existence of an item within the configuration category * * @param name Item name to check within the category */ bool ConfigCategory::itemExists(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return true; } } return false; } /** * Return the value of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getValue(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_value; } } throw new ConfigItemNotFound(); } /** * Return the value of the configuration category item with a default * * @param name The name of the configuration item to return * @param defaultValue The default value to return if the item does not exist * @return string The configuration item value or the default */ string ConfigCategory::getValue(const std::string& name, const std::string& defaultValue) const { try { return getValue(name); } catch (ConfigItemNotFound* e) { Logger::getLogger()->info("'%s' %s , returning default value '%s'", name.c_str(), e->what(), defaultValue.c_str()); delete e; return defaultValue; } } /** * Return a boolean value from a configuration category item * * @param name The name of the item * @param defaultValue The value to return if item is not found or invalid * @return bool The boolean value */ bool ConfigCategory::getBoolValue(const std::string& name, bool defaultValue) const { try { string val = getValue(name); std::string lower = val; std::transform(lower.begin(), lower.end(), lower.begin(), ::tolower); if (lower == "true" || lower == "1") return true; if (lower == "false" || lower == "0") return false; Logger::getLogger()->info("Config item '%s' expected to be boolean but got '%s'", name.c_str(), val.c_str()); return defaultValue; } catch (ConfigItemNotFound* e) { Logger::getLogger()->info("'%s' %s , returning default value '%d'", name.c_str(), e->what(), defaultValue); delete e; return defaultValue; } } /** * Return an integer value from a configuration category item */ int ConfigCategory::getIntegerValue(const std::string& name, int defaultValue) const { try { string val = getValue(name); return stoi(val); } catch (ConfigItemNotFound* e) { Logger::getLogger()->info("'%s' %s , returning default value '%d'", name.c_str(), e->what(), defaultValue); delete e; return defaultValue; } catch (std::invalid_argument& e) { Logger::getLogger()->info("Config item '%s' expected to be integer but got '%s', returning default value '%d'", name.c_str(), e.what(), defaultValue); return defaultValue; } catch (std::out_of_range& e) { Logger::getLogger()->info("Config item '%s' out of range: %s, returning default value '%d'", name.c_str(), e.what(), defaultValue); return defaultValue; } } /** * Return a long value from a configuration category item */ long ConfigCategory::getLongValue(const std::string& name, long defaultValue) const { try { string val = getValue(name); return stol(val); } catch (ConfigItemNotFound* e) { Logger::getLogger()->info("'%s' %s , returning default value '%ld'", name.c_str(), e->what(), defaultValue); delete e; return defaultValue; } catch (std::invalid_argument& e) { Logger::getLogger()->info("Config item '%s' expected to be long but got '%s', returning default value '%ld'", name.c_str(), e.what(), defaultValue); return defaultValue; } catch (std::out_of_range& e) { Logger::getLogger()->info("Config item '%s' out of range: %s, returning default value '%ld'", name.c_str(), e.what(), defaultValue); return defaultValue; } } /** * Return a double value from a configuration category item */ double ConfigCategory::getDoubleValue(const std::string& name, double defaultValue) const { try { string val = getValue(name); return stod(val); } catch (ConfigItemNotFound* e) { Logger::getLogger()->info("'%s' %s , returning default value '%ld'", name.c_str(), e->what(), defaultValue); delete e; return defaultValue; } catch (std::invalid_argument& e) { Logger::getLogger()->info("Config item '%s' expected to be double but got '%s', returning default value '%lf'", name.c_str(), e.what(), defaultValue); return defaultValue; } catch (std::out_of_range& e) { Logger::getLogger()->info("Config item '%s' out of range: %s, returning default value '%lf'", name.c_str(), e.what(), defaultValue); return defaultValue; } } /** * Return the value of the configuration category item list, this * is a convience function used when simple lists are defined * and allows for central processing of the list values * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ vector<string> ConfigCategory::getValueList(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { if (m_items[i]->m_type.compare("list")) { throw new ConfigItemNotAList(); } Document d; vector<string> list; d.Parse(m_items[i]->m_value.c_str()); if (d.HasParseError()) { Logger::getLogger()->error("The JSON value for a list item %s has a parse error: %s, %s", name.c_str(), GetParseError_En(d.GetParseError()), m_items[i]->m_value.c_str()); return list; } if (d.IsArray()) { for (auto& v : d.GetArray()) { if (v.IsString()) { list.push_back(v.GetString()); } } } else { Logger::getLogger()->error("The value of the list item %s should be a JSON array and it is not", name.c_str()); } return list; } } throw new ConfigItemNotFound(); } /** * Return the value of the configuration category item kvlist, this * is a convience function used when key/value lists are defined * and allows for central processing of the list values * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ map<string, string> ConfigCategory::getValueKVList(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { if (m_items[i]->m_type.compare("kvlist")) { throw new ConfigItemNotAList(); } map<string, string> list; Document d; d.Parse(m_items[i]->m_value.c_str()); if (d.HasParseError()) { Logger::getLogger()->error("The JSON value for a kvlist item %s has a parse error: %s, %s", name.c_str(), GetParseError_En(d.GetParseError()), m_items[i]->m_value.c_str()); return list; } for (auto& v : d.GetObject()) { string key = v.name.GetString(); string value = to_string(v.value); list.insert(pair<string, string>(key, value)); } return list; } } throw new ConfigItemNotFound(); } /** * Convert a RapidJSON value to a string * * @param v The RapidJSON value */ std::string ConfigCategory::to_string(const rapidjson::Value& v) const { if (v.IsString()) { return { v.GetString(), v.GetStringLength() }; } else { StringBuffer strbuf; Writer<rapidjson::StringBuffer> writer(strbuf); v.Accept(writer); return { strbuf.GetString(), strbuf.GetLength() }; } } /** * Return the requested attribute of a configuration category item * * @param name The name of the configuration item to return * @param itemAttribute The item attribute (such as "file", "order", "readonly" * @return The configuration item attribute as string * @throws ConfigItemNotFound if the item does not exist in the category * ConfigItemAttributeNotFound if the requested attribute * does not exist for the found item. */ string ConfigCategory::getItemAttribute(const string& itemName, const ItemAttribute itemAttribute) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (itemName.compare(m_items[i]->m_name) == 0) { switch (itemAttribute) { case ORDER_ATTR: return m_items[i]->m_order; case READONLY_ATTR: return m_items[i]->m_readonly; case MANDATORY_ATTR: return m_items[i]->m_mandatory; case FILE_ATTR: return m_items[i]->m_file; case VALIDITY_ATTR: return m_items[i]->m_validity; case GROUP_ATTR: return m_items[i]->m_group; case DISPLAY_NAME_ATTR: return m_items[i]->m_displayName; case DEPRECATED_ATTR: return m_items[i]->m_deprecated; case RULE_ATTR: return m_items[i]->m_rule; case BUCKET_PROPERTIES_ATTR: return m_items[i]->m_bucketProperties; case LIST_SIZE_ATTR: return m_items[i]->m_listSize; case ITEM_TYPE_ATTR: return m_items[i]->m_listItemType; case LIST_NAME_ATTR: return m_items[i]->m_listName; case KVLIST_KEY_NAME_ATTR: return m_items[i]->m_kvlistKeyName; case KVLIST_KEY_DESCRIPTION_ATTR: return m_items[i]->m_kvlistKeyDescription; case JSON_SCHEMA_ATTR: return m_items[i]->m_jsonSchema; default: throw new ConfigItemAttributeNotFound(); } } } throw new ConfigItemNotFound(); } /** * Set the requested attribute of a configuration category item * * @param name The name of the configuration item to return * @param itemAttribute The item attribute (such as "file", "order", "readonly" * @param value The value to set * @return The configuration item attribute as string * @throws ConfigItemNotFound if the item does not exist in the category * ConfigItemAttributeNotFound if the requested attribute * does not exist for the found item. */ bool ConfigCategory::setItemAttribute(const string& itemName, const ItemAttribute itemAttribute, const string& value) { for (unsigned int i = 0; i < m_items.size(); i++) { if (itemName.compare(m_items[i]->m_name) == 0) { switch (itemAttribute) { case ORDER_ATTR: m_items[i]->m_order = value; return true; case READONLY_ATTR: m_items[i]->m_readonly = value; return true; case MANDATORY_ATTR: m_items[i]->m_mandatory = value; return true; case FILE_ATTR: m_items[i]->m_file = value; return true; case MINIMUM_ATTR: m_items[i]->m_minimum = value; return true; case MAXIMUM_ATTR: m_items[i]->m_maximum = value; return true; case LENGTH_ATTR: m_items[i]->m_length = value; return true; case VALIDITY_ATTR: m_items[i]->m_validity = value; return true; case GROUP_ATTR: m_items[i]->m_group = value; return true; case DISPLAY_NAME_ATTR: m_items[i]->m_displayName = value; return true; case DEPRECATED_ATTR: m_items[i]->m_deprecated = value; return true; case RULE_ATTR: m_items[i]->m_rule = value; return true; case BUCKET_PROPERTIES_ATTR: m_items[i]->m_bucketProperties = value; return true; case LIST_SIZE_ATTR: m_items[i]->m_listSize = value; return true; case ITEM_TYPE_ATTR: m_items[i]->m_listItemType = value; return true; case LIST_NAME_ATTR: m_items[i]->m_listName = value; return true; case KVLIST_KEY_NAME_ATTR: m_items[i]->m_kvlistKeyName = value; return true; case KVLIST_KEY_DESCRIPTION_ATTR: m_items[i]->m_kvlistKeyDescription = value; return true; case JSON_SCHEMA_ATTR: m_items[i]->m_jsonSchema = value; return true; default: return false; } } } return false; } /** * Return the type of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getType(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_type; } } throw new ConfigItemNotFound(); } /** * Return the description of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getDescription(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_description; } } throw new ConfigItemNotFound(); } /** * Return the default value of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getDefault(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_default; } } throw new ConfigItemNotFound(); } /** * Update the default value of the configuration category item * * @param name The name of the configuration item to update * @param value New value of the configuration item * @return bool Whether update succeeded */ bool ConfigCategory::setDefault(const string& name, const string& value) { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { m_items[i]->m_default = value; return true; } } return false; } /** * Update the value of the configuration category item * * @param name The name of the configuration item to update * @param value New value of the configuration item * @return bool Whether update succeeded */ bool ConfigCategory::setValue(const string& name, const string& value) { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { m_items[i]->m_value = value; return true; } } return false; } /** * Return the display name of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getDisplayName(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_displayName; } } throw new ConfigItemNotFound(); } /** * Return the length value of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getLength(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_length; } } throw new ConfigItemNotFound(); } /** * Return the minimum value of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getMinimum(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_minimum; } } throw new ConfigItemNotFound(); } /** * Return the maximum of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ string ConfigCategory::getMaximum(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_maximum; } } throw new ConfigItemNotFound(); } /** * Return the options of the configuration category item * * @param name The name of the configuration item to return * @return string The configuration item name * @throws exception if the item does not exist in the category */ vector<string> ConfigCategory::getOptions(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_options; } } throw new ConfigItemNotFound(); } /** * Return the permissions of the configuration category item * * @param name The name of the configuration item to return * @return vector<string> The configuration item permissions * @throws exception if the item does not exist in the category */ vector<string> ConfigCategory::getPermissions(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_permissions; } } throw new ConfigItemNotFound(); } /** * Return true if the user has permission to update the named item * * @param name The name of the configuration item to return * @param rolename The name of the user role to test * @return bool True if the named user can update the configuration item * @throws exception if the item does not exist in the category */ bool ConfigCategory::hasPermission(const std::string& name, const std::string& rolename) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { if (m_items[i]->m_permissions.empty()) return true; for (auto& perm : m_items[i]->m_permissions) if (rolename.compare(perm) == 0) return true; return false; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a string item * * @param name The name of the item to test * @return bool True if the item is a string type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isString(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_itemType == StringItem; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is an enumeration item * * @param name The name of the item to test * @return bool True if the item is a string type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isEnumeration(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_itemType == EnumerationItem; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a JSON item * * @param name The name of the item to test * @return bool True if the item is a JSON type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isJSON(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_itemType == JsonItem; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a Bool item * * @param name The name of the item to test * @return bool True if the item is a Bool type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isBool(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_itemType == BoolItem; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a Numeric item * * @param name The name of the item to test * @return bool True if the item is a Numeric type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isNumber(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_itemType == NumberItem; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a Double item * * @param name The name of the item to test * @return bool True if the item is a Double type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isDouble(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return m_items[i]->m_itemType == DoubleItem; } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is deprecated a item * * @param name The name of the item to test * @return bool True if the item is a deprecated type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isDeprecated(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return ! m_items[i]->m_deprecated.empty(); } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a list item * * @param name The name of the item to test * @return bool True if the item is a Numeric type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isList(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return (m_items[i]->m_type.compare("list") == 0); } } throw new ConfigItemNotFound(); } /** * Return if the configuration item is a kvlist item * * @param name The name of the item to test * @return bool True if the item is a Numeric type * @throws exception If the item was not found in the configuration category */ bool ConfigCategory::isKVList(const string& name) const { for (unsigned int i = 0; i < m_items.size(); i++) { if (name.compare(m_items[i]->m_name) == 0) { return (m_items[i]->m_type.compare("kvlist") == 0); } } throw new ConfigItemNotFound(); } /** * Set the description for the configuration category * * @param description The configuration category description */ void ConfigCategory::setDescription(const string& description) { m_description = description; } /** * Return JSON string of all category components * * @param full false is the deafult, true evaluates all the members of the CategoryItems * */ string ConfigCategory::toJSON(const bool full) const { ostringstream convert; convert << "{ \"key\" : \"" << JSONescape(m_name) << "\", "; convert << "\"description\" : \"" << JSONescape(m_description) << "\", \"value\" : "; // Add items convert << ConfigCategory::itemsToJSON(full); convert << " }"; return convert.str(); } /** * Return JSON string of category items only * * @param full false is the deafult, true evaluates all the members of the CategoryItems * */ string ConfigCategory::itemsToJSON(const bool full) const { ostringstream convert; convert << "{"; for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { convert << (*it)->toJSON(full); if (it + 1 != m_items.cend() ) { convert << ", "; } } convert << "}"; return convert.str(); } /** * Constructor for a configuration item * @param name The category item name * @param item The item object to add * @throw ConfigMalformed exception * @throw runtime_error exception */ ConfigCategory::CategoryItem::CategoryItem(const string& name, const Value& item) { m_name = name; m_itemType = UnknownType; if (! item.IsObject()) { throw new ConfigMalformed(); } if (item.HasMember("type")) { m_type = item["type"].GetString(); } else { m_type = ""; } if (item.HasMember("description")) { m_description = item["description"].GetString(); } else { m_description = ""; } if (item.HasMember("order")) { m_order = item["order"].GetString(); } else { m_order = ""; } if (item.HasMember("length")) { m_length = item["length"].GetString(); } else { m_length = ""; } if (item.HasMember("minimum")) { m_minimum = item["minimum"].GetString(); } else { m_minimum = ""; } if (item.HasMember("maximum")) { m_maximum = item["maximum"].GetString(); } else { m_maximum = ""; } if (item.HasMember("file")) { m_file = item["file"].GetString(); } else { m_file = ""; } if (item.HasMember("readonly")) { m_readonly = item["readonly"].GetString(); } else { m_readonly = ""; } if (item.HasMember("mandatory")) { m_mandatory = item["mandatory"].GetString(); } else { m_mandatory = ""; } if (m_type.compare("category") == 0) { m_itemType = CategoryType; } if (m_type.compare("script") == 0) { m_itemType = ScriptItem; } if (m_type.compare("code") == 0) { m_itemType = CodeItem; } if (m_type.compare("bucket") == 0) { m_itemType = BucketItem; } if (m_type.compare("list") == 0) { m_itemType = ListItem; } if (m_type.compare("kvlist") == 0) { m_itemType = KVListItem; } if (item.HasMember("deprecated")) { m_deprecated = item["deprecated"].GetString(); } else { m_deprecated = ""; } if (item.HasMember("displayName")) { m_displayName = item["displayName"].GetString(); } else { m_displayName = ""; } if (item.HasMember("validity")) { m_validity = item["validity"].GetString(); } else { m_validity = ""; } if (item.HasMember("group")) { m_group = item["group"].GetString(); } else { m_group = ""; } if (item.HasMember("rule")) { m_rule = item["rule"].GetString(); } else { m_rule = ""; } if (item.HasMember("properties")) { Logger::getLogger()->debug("item['properties'].IsString()=%s, item['properties'].IsObject()=%s", item["properties"].IsString()?"true":"false", item["properties"].IsObject()?"true":"false"); rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["properties"].Accept(writer); m_bucketProperties = item["properties"].IsObject() ? // use current string strbuf.GetString() : // Unescape the string JSONunescape(strbuf.GetString()); Logger::getLogger()->debug("m_bucketProperties=%s", m_bucketProperties.c_str()); } else { m_bucketProperties = ""; } if (m_itemType == BucketItem && m_bucketProperties.empty()) { throw new runtime_error("Bucket configuration item is missing the \"properties\" attribute"); } if (item.HasMember("options")) { const Value& options = item["options"]; if (options.IsArray()) { for (SizeType i = 0; i < options.Size(); i++) { m_options.push_back(string(options[i].GetString())); } } } if (item.HasMember("permissions")) { const Value& permissions = item["permissions"]; if (permissions.IsArray()) { for (SizeType i = 0; i < permissions.Size(); i++) { m_permissions.push_back(string(permissions[i].GetString())); } } } if (item.HasMember("schema")) { Logger::getLogger()->debug("item['schema'].IsString()=%s, item['schema'].IsObject()=%s", item["schema"].IsString()?"true":"false", item["schema"].IsObject()?"true":"false"); rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["schema"].Accept(writer); m_jsonSchema = item["schema"].IsObject() ? // use current string strbuf.GetString() : // Unescape the string JSONunescape(strbuf.GetString()); Logger::getLogger()->debug("m_jsonSchema=%s", m_jsonSchema.c_str()); } else { m_jsonSchema = ""; } if (item.HasMember("items")) { if (item["items"].IsString()) { m_listItemType = item["items"].GetString(); } else { throw new runtime_error("Items configuration item property is not a string"); } } else if (m_itemType == ListItem || m_itemType == KVListItem) { throw new runtime_error("List configuration item is missing the \"items\" attribute"); } if (item.HasMember("listSize")) { if (item["listSize"].IsString()) { m_listSize = item["listSize"].GetString(); } else { throw new runtime_error("ListSize configuration item property is not a string"); } } if (item.HasMember("listName")) { if (item["listName"].IsString()) { m_listName = item["listName"].GetString(); } else { throw new runtime_error("ListName configuration item property is not a string"); } } if (item.HasMember("keyName")) { if (item["keyName"].IsString()) { m_kvlistKeyName = item["keyName"].GetString(); } else { throw new runtime_error("keyName configuration item property is not a string"); } } if (item.HasMember("keyDescription")) { if (item["keyDescription"].IsString()) { m_kvlistKeyDescription = item["keyDescription"].GetString(); } else { throw new runtime_error("keyDescription configuration item property is not a string"); } } std::string m_typeUpperCase = m_type; for (auto & c: m_typeUpperCase) c = toupper(c); // Item "value" can be an escaped JSON string, so check m_type JSON as well if (item.HasMember("value") && (item["value"].IsObject() || m_typeUpperCase.compare("JSON") == 0)) { rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["value"].Accept(writer); m_value = item["value"].IsObject() ? // use current string strbuf.GetString() : // Unescape the string JSONunescape(strbuf.GetString()); // If it's not a real eject, check the string buffer it is: if (!item["value"].IsObject()) { boost::replace_all(m_value, "\\n", ""); Document check; check.Parse(m_value.c_str()); if (check.HasParseError()) { Logger::getLogger()->error("The JSON configuration item %s has a parse error: %s", m_name.c_str(), GetParseError_En(check.GetParseError())); throw new runtime_error(GetParseError_En(check.GetParseError())); } if (!check.IsObject()) { Logger::getLogger()->error("The JSON configuration item %s is not a valid JSON objects", m_name.c_str()); throw new runtime_error("'value' JSON property is not an object"); } } if (m_typeUpperCase.compare("JSON") == 0) { m_itemType = JsonItem; } else { // Avoids overwrite if it is already valued if (m_itemType == StringItem) { m_itemType = JsonItem; } } } // Item "value" is a Bool or m_type is boolean else if (item.HasMember("value") && (item["value"].IsBool() || m_type.compare("boolean") == 0)) { m_value = !item["value"].IsBool() ? // use string value item["value"].GetString() : // use bool value item["value"].GetBool() ? "true" : "false"; m_itemType = BoolItem; } // Item "value" is just a string else if (item.HasMember("value") && item["value"].IsString()) { // Get content of script type item as is rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["value"].Accept(writer); if (m_itemType == ScriptItem || m_itemType == CodeItem) { m_value = strbuf.GetString(); if (m_value.empty()) { m_value = "\"\""; } } else { m_value = JSONunescape(strbuf.GetString()); if (m_options.size() == 0) m_itemType = StringItem; else m_itemType = EnumerationItem; } } // Item "value" is a Double else if (item.HasMember("value") && item["value"].IsDouble()) { rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["value"].Accept(writer); m_value = strbuf.GetString(); m_itemType = DoubleItem; } // Item "value" is a Number else if (item.HasMember("value") && item["value"].IsNumber()) { // Don't check Uint/Int/Long etc: just get the string value rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["value"].Accept(writer); m_value = strbuf.GetString(); m_itemType = NumberItem; } // Item "value" has an unknwon type so far: set empty string else { m_value = ""; } // Item "default" can be an escaped JSON string, so check m_type JSON as well if (item.HasMember("default") && (item["default"].IsObject() || m_typeUpperCase.compare("JSON") == 0)) { rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["default"].Accept(writer); m_default = item["default"].IsObject() ? // use current string strbuf.GetString() : // Unescape the string JSONunescape(strbuf.GetString()); // If it's not a real eject, check the string buffer it is: if (!item["default"].IsObject()) { boost::replace_all(m_default, "\\n", ""); Document check; check.Parse(m_default.c_str()); if (check.HasParseError()) { Logger::getLogger()->error("The JSON configuration item %s has a parse error in the default value: %s", m_name.c_str(), GetParseError_En(check.GetParseError())); throw new runtime_error(GetParseError_En(check.GetParseError())); } if (!check.IsObject()) { Logger::getLogger()->error("The JSON configuration item %s default is not a valid JSON object", m_name.c_str()); throw new runtime_error("'default' JSON property is not an object"); } } if (m_typeUpperCase.compare("JSON") == 0) { m_itemType = JsonItem; } else { // Avoids overwrite if it is already valued if (m_itemType == StringItem) { m_itemType = JsonItem; } } } // Item "default" is a Bool or m_type is boolean else if (item.HasMember("default") && (item["default"].IsBool() || m_type.compare("boolean") == 0)) { m_default = !item["default"].IsBool() ? // use string value item["default"].GetString() : // use bool value item["default"].GetBool() ? "true" : "false"; m_itemType = BoolItem; } // Item "default" is just a string else if (item.HasMember("default") && item["default"].IsString()) { // Get content of script type item as is rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["default"].Accept(writer); if (m_itemType == ScriptItem || m_itemType == CodeItem) { m_default = strbuf.GetString(); if (m_default.empty()) { m_default = "\"\""; } } else { m_default = JSONunescape(strbuf.GetString()); if (m_options.size() == 0) m_itemType = StringItem; else m_itemType = EnumerationItem; } } // Item "default" is a Double else if (item.HasMember("default") && item["default"].IsDouble()) { rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["default"].Accept(writer); m_default = strbuf.GetString(); m_itemType = DoubleItem; } // Item "default" is a Number else if (item.HasMember("default") && item["default"].IsNumber()) { // Don't check Uint/Int/Long etc: just get the string value rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); item["default"].Accept(writer); m_default = strbuf.GetString(); m_itemType = NumberItem; } else // Item "default" has an unknwon type so far: set empty string { m_default = ""; } } /** * Constructor for a configuration item */ ConfigCategory::CategoryItem::CategoryItem(const string& name, const std::string& description, const std::string& type, const std::string def, const std::string& value) { m_name = name; m_description = description; m_type = type; m_default = def; m_value = value; m_itemType = StringItem; } /** * Constructor for a configuration item */ ConfigCategory::CategoryItem::CategoryItem(const string& name, const std::string& description, const std::string def, const std::string& value, const vector<string> options) { m_name = name; m_description = description; m_type = "enumeration"; m_default = def; m_value = value; m_itemType = StringItem; for (auto it = options.cbegin(); it != options.cend(); it++) { m_options.push_back(*it); } } /** * Copy constructor for configuration item */ ConfigCategory::CategoryItem::CategoryItem(const CategoryItem& rhs) { m_name = rhs.m_name; m_displayName = rhs.m_displayName; m_type = rhs.m_type; m_default = rhs.m_default; m_value = rhs.m_value; m_description = rhs.m_description; m_order = rhs.m_order; m_readonly = rhs.m_readonly; m_mandatory = rhs.m_mandatory; m_deprecated = rhs.m_deprecated; m_length = rhs.m_length; m_minimum = rhs.m_minimum; m_maximum = rhs.m_maximum; m_filename = rhs.m_filename; for (auto it = rhs.m_options.cbegin(); it != rhs.m_options.cend(); it++) { m_options.push_back(*it); } m_file = rhs.m_file; m_itemType = rhs.m_itemType; m_validity = rhs.m_validity; m_group = rhs.m_group; m_rule = rhs.m_rule; m_bucketProperties = rhs.m_bucketProperties; m_listSize = rhs.m_listSize; m_listItemType = rhs.m_listItemType; m_listName = rhs.m_listName; m_kvlistKeyName = rhs.m_kvlistKeyName; m_kvlistKeyDescription = rhs.m_kvlistKeyDescription; for (auto it = rhs.m_permissions.cbegin(); it != rhs.m_permissions.cend(); it++) { m_permissions.push_back(*it); } m_jsonSchema = rhs.m_jsonSchema; } /** * Create a JSON representation of the configuration item * * @param full false is the default, true evaluates all the members of the CategoryItem * */ string ConfigCategory::CategoryItem::toJSON(const bool full) const { ostringstream convert; convert << "\"" << JSONescape(m_name) << "\" : { "; convert << "\"description\" : \"" << JSONescape(m_description) << "\", "; if (! m_displayName.empty()) { convert << "\"displayName\" : \"" << m_displayName << "\", "; } convert << "\"type\" : \"" << m_type << "\", "; if (m_options.size() > 0) { convert << "\"options\" : [ "; for (int i = 0; i < m_options.size(); i++) { if (i > 0) convert << ","; convert << "\"" << m_options[i] << "\""; } convert << "], "; } if (m_permissions.size() > 0) { convert << "\"permissions\" : [ "; for (int i = 0; i < m_permissions.size(); i++) { if (i > 0) convert << ","; convert << "\"" << m_permissions[i] << "\""; } convert << "], "; } if (m_itemType == StringItem || m_itemType == BoolItem || m_itemType == EnumerationItem || m_itemType == BucketItem || m_itemType == ListItem || m_itemType == KVListItem) { convert << "\"value\" : \"" << JSONescape(m_value) << "\", "; convert << "\"default\" : \"" << JSONescape(m_default) << "\""; } else if (m_itemType == JsonItem || m_itemType == NumberItem || m_itemType == DoubleItem || m_itemType == ScriptItem || m_itemType == CodeItem) { convert << "\"value\" : " << m_value << ", "; convert << "\"default\" : " << m_default; } else { Logger::getLogger()->error("Unknown item type in configuration category"); } if (full) { if (!m_order.empty()) { convert << ", \"order\" : \"" << m_order << "\""; } if (!m_length.empty()) { convert << ", \"length\" : \"" << m_length << "\""; } if (!m_minimum.empty()) { convert << ", \"minimum\" : \"" << m_minimum << "\""; } if (!m_maximum.empty()) { convert << ", \"maximum\" : \"" << m_maximum << "\""; } if (!m_readonly.empty()) { convert << ", \"readonly\" : \"" << m_readonly << "\""; } if (!m_mandatory.empty()) { convert << ", \"mandatory\" : \"" << m_mandatory << "\""; } if (!m_validity.empty()) { convert << ", \"validity\" : \"" << JSONescape(m_validity) << "\""; } if (!m_rule.empty()) { convert << ", \"rule\" : \"" << JSONescape(m_rule) << "\""; } if (!m_bucketProperties.empty()) { convert << ", \"properties\" : " << m_bucketProperties; } if (!m_group.empty()) { convert << ", \"group\" : \"" << m_group << "\""; } if (!m_file.empty()) { convert << ", \"file\" : \"" << m_file << "\""; } if (!m_listSize.empty()) { convert << ", \"listSize\" : \"" << m_listSize << "\""; } if (!m_listItemType.empty()) { convert << ", \"items\" : \"" << m_listItemType << "\""; } if (!m_listName.empty()) { convert << ", \"listName\" : \"" << m_listName << "\""; } if (!m_kvlistKeyName.empty()) { convert << ", \"keyName\" : \"" << m_kvlistKeyName << "\""; } if (!m_kvlistKeyDescription.empty()) { convert << ", \"keyDescription\" : \"" << m_kvlistKeyDescription << "\""; } if (!m_jsonSchema.empty()) { convert << ", \"schema\" : " << m_jsonSchema; } } convert << " }"; return convert.str(); } /** * Return only "default" item values */ string ConfigCategory::CategoryItem::defaultToJSON() const { ostringstream convert; convert << "\"" << JSONescape(m_name) << "\" : { "; convert << "\"description\" : \"" << JSONescape(m_description) << "\", "; convert << "\"type\" : \"" << m_type << "\""; if (!m_order.empty()) { convert << ", \"order\" : \"" << m_order << "\""; } if (!m_displayName.empty()) { convert << ", \"displayName\" : \"" << m_displayName << "\""; } if (!m_length.empty()) { convert << ", \"length\" : \"" << m_length << "\""; } if (!m_minimum.empty()) { convert << ", \"minimum\" : \"" << m_minimum << "\""; } if (!m_maximum.empty()) { convert << ", \"maximum\" : \"" << m_maximum << "\""; } if (!m_readonly.empty()) { convert << ", \"readonly\" : \"" << m_readonly << "\""; } if (!m_mandatory.empty()) { convert << ", \"mandatory\" : \"" << m_mandatory << "\""; } if (!m_validity.empty()) { convert << ", \"validity\" : \"" << JSONescape(m_validity) << "\""; } if (!m_rule.empty()) { convert << ", \"rule\" : \"" << JSONescape(m_rule) << "\""; } if (!m_bucketProperties.empty()) { convert << ", \"properties\" : " << m_bucketProperties; } if (!m_group.empty()) { convert << ", \"group\" : \"" << m_group << "\""; } if (!m_file.empty()) { convert << ", \"file\" : \"" << m_file << "\""; } if (m_options.size() > 0) { convert << ", \"options\" : [ "; for (int i = 0; i < m_options.size(); i++) { if (i > 0) convert << ","; convert << "\"" << m_options[i] << "\""; } convert << "]"; } if (m_permissions.size() > 0) { convert << ", \"permissions\" : [ "; for (int i = 0; i < m_permissions.size(); i++) { if (i > 0) convert << ","; convert << "\"" << m_permissions[i] << "\""; } convert << "]"; } if (!m_listSize.empty()) { convert << ", \"listSize\" : \"" << m_listSize << "\""; } if (!m_listItemType.empty()) { convert << ", \"items\" : \"" << m_listItemType << "\""; } if (!m_listName.empty()) { convert << ", \"listName\" : \"" << m_listName << "\""; } if (!m_kvlistKeyName.empty()) { convert << ", \"keyName\" : \"" << m_kvlistKeyName << "\""; } if (!m_kvlistKeyDescription.empty()) { convert << ", \"keyDescription\" : \"" << m_kvlistKeyDescription << "\""; } if (!m_jsonSchema.empty()) { convert << ", \"schema\" : " << m_jsonSchema; } if (m_itemType == StringItem || m_itemType == EnumerationItem || m_itemType == BoolItem || m_itemType == BucketItem || m_itemType == ListItem || m_itemType == KVListItem) { convert << ", \"default\" : \"" << JSONescape(m_default) << "\" }"; } /** * NOTE: * These data types must be all escaped. * "default" items in the DefaultConfigCategory class are sent to * ConfigurationManager interface which requires string values only: * * examples: * we must use "100" not 100 * and for JSON * "{\"pipeline\":[\"scale\"]}" not {"pipeline":["scale"]} */ else if (m_itemType == JsonItem || m_itemType == NumberItem || m_itemType == DoubleItem || m_itemType == ScriptItem || m_itemType == CodeItem) { convert << ", \"default\" : \"" << JSONescape(m_default) << "\" }"; } return convert.str(); } /** * Parse BucketItem value in JSON dict format and return the key value pairs within that * * @param json JSON string representing the BucketItem value * @return Vector with pairs of found key/value string pairs in BucketItem value */ vector<pair<string,string>>* ConfigCategory::parseBucketItemValue(const string & json) { Document document; if (document.Parse(json.c_str()).HasParseError()) { Logger::getLogger()->error("parseBucketItemValue(): The provided JSON string has a parse error: %s", GetParseError_En(document.GetParseError())); return NULL; } vector<pair<string,string>> *vec = new vector<pair<string,string>>; for (const auto & m : document.GetObject()) vec->emplace_back(make_pair<string,string>(m.name.GetString(), m.value.GetString())); return vec; } // DefaultConfigCategory constructor DefaultConfigCategory::DefaultConfigCategory(const string& name, const string& json) : ConfigCategory::ConfigCategory(name, json) { } /** * Destructor for the default configuration category. Simply call the base class * destructor. */ DefaultConfigCategory::~DefaultConfigCategory() { } /** * Return JSON string of all category components * of a DefaultConfigCategory class */ string DefaultConfigCategory::toJSON() const { ostringstream convert; convert << "{ "; convert << "\"key\" : \"" << JSONescape(m_name) << "\", "; convert << "\"description\" : \"" << JSONescape(m_description) << "\", \"value\" : "; // Add items convert << DefaultConfigCategory::itemsToJSON(); convert << " }"; return convert.str(); } /** * Return DefaultConfigCategory "default" items only */ string DefaultConfigCategory::itemsToJSON() const { ostringstream convert; convert << "{"; for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { convert << (*it)->defaultToJSON(); if (it + 1 != m_items.cend() ) { convert << ", "; } } convert << "}"; return convert.str(); } /** * Return JSON string of a category item * @param itemName The given item within current category * @return The JSON string version of itemName * If not found {} is returned */ string ConfigCategory::itemToJSON(const string& itemName) const { ostringstream convert; convert << "{"; for (auto it = m_items.cbegin(); it != m_items.cend(); it++) { if ((*it)->m_name.compare(itemName) == 0) { convert << (*it)->toJSON(); } } convert << "}"; return convert.str(); } /** * Configuration Category constructor * * @param name The name of the configuration category * @param json JSON content of the configuration category */ ConfigCategoryChange::ConfigCategoryChange(const string& json) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("Configuration parse error in category change %s: %s at %d", json.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset()); throw new ConfigMalformed(); } if (!doc.HasMember("category")) { Logger::getLogger()->error("Configuration change is missing a category element '%s'", json.c_str()); throw new ConfigMalformed(); } if (doc.HasMember("parent_category")) { m_parent_name=doc["parent_category"].GetString(); } else { m_parent_name=""; } if (!doc.HasMember("items")) { Logger::getLogger()->error("Configuration change is missing an items element '%s'", json.c_str()); throw new ConfigMalformed(); } m_name = doc["category"].GetString(); const Value& items = doc["items"]; for (Value::ConstMemberIterator itr = items.MemberBegin(); itr != items.MemberEnd(); ++itr) { try { m_items.push_back(new CategoryItem(itr->name.GetString(), itr->value)); } catch (exception* e) { Logger::getLogger()->error("Configuration parse error in category %s item '%s', %s: %s", m_name.c_str(), itr->name.GetString(), json.c_str(), e->what()); delete e; throw ConfigMalformed(); } catch (...) { throw; } } } ================================================ FILE: C/common/cryptography_utils.cpp ================================================ /* * Fledge utilities functions for generating cryptographic hash * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <sstream> #include <iomanip> #include "cryptography_utils.h" /* * * Generates SHA256 Hash * * @param input JSON string for the reading * @return SHA256 Hash String */ std::string compute_sha256(const std::string& input) { #ifdef OPENSSL_VERSION_NUMBER #if OPENSSL_VERSION_NUMBER >= 0x30000000L // Code for OpenSSL 3.0.x unsigned char digest[SHA256_DIGEST_LENGTH]; EVP_MD_CTX *ctx = EVP_MD_CTX_new(); if (!ctx) { throw std::runtime_error("Failed to create OpenSSL EVP_MD_CTX"); } if (EVP_DigestInit_ex(ctx, EVP_sha256(), nullptr) != 1 || EVP_DigestUpdate(ctx, input.data(), input.size()) != 1 || EVP_DigestFinal_ex(ctx, digest, nullptr) != 1) { EVP_MD_CTX_free(ctx); throw std::runtime_error("OpenSSL SHA-256 computation failed"); } EVP_MD_CTX_free(ctx); std::ostringstream ss; for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) { ss << std::setw(2) << std::setfill('0') << std::hex << (int)digest[i]; } return ss.str(); #else // Code for OpenSSL 1.1.x unsigned char digest[SHA256_DIGEST_LENGTH]; SHA256_CTX sha256Context; SHA256_Init(&sha256Context); SHA256_Update(&sha256Context, input.c_str(), input.length()); SHA256_Final(digest, &sha256Context); std::ostringstream ss; for (int i = 0; i < SHA256_DIGEST_LENGTH; i++) { ss << std::setw(2) << std::setfill('0') << std::hex << (int)digest[i]; } return ss.str(); #endif #endif } std::string compute_md5(const std::string& input) { #ifdef OPENSSL_VERSION_NUMBER #if OPENSSL_VERSION_NUMBER >= 0x30000000L // Code for OpenSSL 3.0.x unsigned char digest[MD5_DIGEST_LENGTH]; EVP_MD_CTX *ctx = EVP_MD_CTX_new(); if (!ctx) { throw std::runtime_error("Failed to create OpenSSL EVP_MD_CTX"); } if (EVP_DigestInit_ex(ctx, EVP_md5(), nullptr) != 1 || EVP_DigestUpdate(ctx, input.data(), input.size()) != 1 || EVP_DigestFinal_ex(ctx, digest, nullptr) != 1) { EVP_MD_CTX_free(ctx); throw std::runtime_error("OpenSSL MD5 computation failed"); } EVP_MD_CTX_free(ctx); std::ostringstream ss; for (int i = 0; i < MD5_DIGEST_LENGTH; i++) { ss << std::setw(2) << std::setfill('0') << std::hex << (int)digest[i]; } return ss.str(); #else // Code for OpenSSL 1.1.x unsigned char digest[MD5_DIGEST_LENGTH]; MD5_CTX md5Context; MD5_Init(&md5Context); MD5_Update(&md5Context, input.c_str(), input.length()); MD5_Final(digest, &md5Context); std::ostringstream ss; for (int i = 0; i < MD5_DIGEST_LENGTH; i++) { ss << std::setw(2) << std::setfill('0') << std::hex << (int)digest[i]; } return ss.str(); #endif #endif } ================================================ FILE: C/common/databuffer.cpp ================================================ /* * Fledge * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <databuffer.h> #include <exception> #include <stdexcept> #include <stdlib.h> #include <string.h> using namespace std; /** * Buffer constructor * * @param itemSize The size of each item in the buffer * @param len The length of the buffer, i.e. how many items can it hold */ DataBuffer::DataBuffer(size_t itemSize, size_t len) : m_itemSize(itemSize), m_len(len) { m_data = calloc(len, itemSize); if (m_data == NULL) throw runtime_error("Insufficient memory to create buffer"); } /** * DataBuffer destructor */ DataBuffer::~DataBuffer() { if (m_data) free(m_data); m_data = NULL; } /** * DataBuffer copy constructor * * @param rhs DataBuffer to copy */ DataBuffer::DataBuffer(const DataBuffer& rhs) { m_itemSize = rhs.m_itemSize; m_len = rhs.m_len; m_data = calloc(m_len, m_itemSize); if (m_data) memcpy(m_data, rhs.m_data, m_itemSize * m_len); else throw runtime_error("Insufficient memory to copy databuffer"); } /** * Populate the contents of a DataBuffer * * @param src Source of the data * @param len Number of bytes in the source to copy */ void DataBuffer::populate(void *src, int len) { size_t toCopy = min((size_t)len, m_len * m_itemSize); memcpy(m_data, src, toCopy); } ================================================ FILE: C/common/datapoint.cpp ================================================ /* * Fledge * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <string> #include <sstream> #include <iomanip> #include <cfloat> #include <vector> #include <logger.h> #include <datapoint.h> #include <exception> #include <base64databuffer.h> #include <base64dpimage.h> /** * Return the value as a string * * @return String representing the DatapointValue object */ std::string DatapointValue::toString() const { std::ostringstream ss; switch (m_type) { case T_INTEGER: ss << m_value.i; return ss.str(); case T_FLOAT: { char tmpBuffer[100]; std::string s; snprintf(tmpBuffer, sizeof(tmpBuffer), "%.10f", m_value.f); s= tmpBuffer; // remove trailing 0's if (s[s.size()-1]== '0') { s.erase(s.find_last_not_of('0') + 1, std::string::npos); // add '0' i if (s[s.size()-1]== '.') s.append("0"); } return s; } case T_FLOAT_ARRAY: ss << "["; for (auto it = m_value.a->begin(); it != m_value.a->end(); ++it) { if (it != m_value.a->begin()) { ss << ", "; } ss << *it; } ss << "]"; return ss.str(); case T_DP_DICT: case T_DP_LIST: ss << ((m_type==T_DP_DICT)?'{':'['); for (auto it = m_value.dpa->begin(); // std::vector<Datapoint *>* dpa; it != m_value.dpa->end(); ++it) { if (it != m_value.dpa->begin()) { ss << ", "; } ss << ((m_type==T_DP_DICT)?(*it)->toJSONProperty():(*it)->getData().toString()); } ss << ((m_type==T_DP_DICT)?'}':']'); return ss.str(); case T_STRING: ss << "\""; ss << escape(*m_value.str); ss << "\""; return ss.str(); case T_DATABUFFER: ss << "\"__DATABUFFER:" << ((Base64DataBuffer *)m_value.dataBuffer)->encode() << "\""; return ss.str(); case T_IMAGE: ss << "\"__DPIMAGE:" << ((Base64DPImage *)m_value.image)->encode() << "\""; return ss.str(); case T_2D_FLOAT_ARRAY: { ss << "[ "; bool first = true; for (auto row : *(m_value.a2d)) { if (first) first = false; else ss << ", "; ss << "["; for (auto it = row->begin(); it != row->end(); ++it) { if (it != row->begin()) { ss << ", "; } ss << *it; } ss << "]"; } ss << " ]"; return ss.str(); } default: throw std::runtime_error("No string representation for datapoint type"); } } /** * Delete the DatapointValue along with possibly nested Datapoint objects */ void DatapointValue::deleteNestedDPV() { if (m_type == T_STRING) { delete m_value.str; m_value.str = NULL; } else if (m_type == T_FLOAT_ARRAY) { delete m_value.a; m_value.a = NULL; } else if (m_type == T_DATABUFFER) { delete m_value.dataBuffer; m_value.dataBuffer = NULL; } else if (m_type == T_IMAGE) { delete m_value.image; m_value.image = NULL; } else if (m_type == T_DP_DICT || m_type == T_DP_LIST) { if (m_value.dpa) { for (auto it = m_value.dpa->begin(); it != m_value.dpa->end(); ++it) { // Call DatapointValue destructor delete(*it); } // Remove vector pointer delete m_value.dpa; m_value.dpa = NULL; } } else if (m_type == T_2D_FLOAT_ARRAY) { for (auto it = m_value.a2d->begin(); it != m_value.a2d->end(); ++it) { delete(*it); } delete m_value.a2d; m_value.a2d = NULL; } } /** * DatapointValue class destructor */ DatapointValue::~DatapointValue() { // Remove memory allocated by datapoints // along with possibly nested Datapoint objects deleteNestedDPV(); } /** * Copy constructor */ DatapointValue::DatapointValue(const DatapointValue& obj) { m_type = obj.m_type; switch (m_type) { case T_STRING: m_value.str = new std::string(*(obj.m_value.str)); break; case T_FLOAT_ARRAY: m_value.a = new std::vector<double>(*(obj.m_value.a)); break; case T_DP_DICT: case T_DP_LIST: m_value.dpa = new std::vector<Datapoint*>(); for (auto it = obj.m_value.dpa->begin(); it != obj.m_value.dpa->end(); ++it) { Datapoint *d = *it; // Add new allocated datapoint to the vector // using copy constructor Datapoint *dpCopy = new Datapoint(*d); m_value.dpa->emplace_back(dpCopy); } break; case T_IMAGE: m_value.image = new DPImage(*(obj.m_value.image)); break; case T_DATABUFFER: m_value.dataBuffer = new DataBuffer(*(obj.m_value.dataBuffer)); break; case T_2D_FLOAT_ARRAY: m_value.a2d = new std::vector< std::vector<double>* >; for (auto row : *obj.m_value.a2d) { std::vector<double> *nrow = new std::vector<double>; for (auto& d : *row) { nrow->push_back(d); } m_value.a2d->push_back(nrow); } m_type = T_2D_FLOAT_ARRAY; break; default: m_value = obj.m_value; break; } } /** * Assignment Operator */ DatapointValue& DatapointValue::operator=(const DatapointValue& rhs) { if (m_type == T_STRING) { // Remove previous value delete m_value.str; } if (m_type == T_FLOAT_ARRAY) { // Remove previous value delete m_value.a; } if (m_type == T_DP_DICT || m_type == T_DP_LIST) { // Remove previous value delete m_value.dpa; } if (m_type == T_IMAGE) { delete m_value.image; } if (m_type == T_DATABUFFER) { delete m_value.dataBuffer; } if (m_type == T_2D_FLOAT_ARRAY) { delete m_value.a2d; } m_type = rhs.m_type; switch (m_type) { case T_STRING: m_value.str = new std::string(*(rhs.m_value.str)); break; case T_FLOAT_ARRAY: m_value.a = new std::vector<double>(*(rhs.m_value.a)); break; case T_DP_DICT: case T_DP_LIST: m_value.dpa = new std::vector<Datapoint*>(*(rhs.m_value.dpa)); break; case T_IMAGE: m_value.image = new DPImage(*(rhs.m_value.image)); break; case T_DATABUFFER: m_value.dataBuffer = new DataBuffer(*(rhs.m_value.dataBuffer)); break; case T_2D_FLOAT_ARRAY: m_value.a2d = new std::vector< std::vector<double>* >; for (auto row : *(rhs.m_value.a2d)) { std::vector<double> *nrow = new std::vector<double>; for (auto& d : *row) { nrow->push_back(d); } m_value.a2d->push_back(nrow); } m_type = T_2D_FLOAT_ARRAY; break; default: m_value = rhs.m_value; break; } return *this; } /** * Escape quotes etc to allow the string to be a property value within * a JSON document * * @param str The string to escape * @return The escaped string */ const std::string DatapointValue::escape(const std::string& str) const { std::string rval; int bscount = 0; for (size_t i = 0; i < str.length(); i++) { if (str[i] == '\\') { if (i + 1 < str.length() && (str[i + 1] == '"' || str[i + 1] == '\\' || str[i + 1] == '/'|| str[i-1] == '\\')) { rval += '\\'; } else { rval += "\\\\"; } bscount++; } else if (str[i] == '\"') { if ((bscount & 1) == 0) // not already escaped { rval += "\\"; // Add escape of " } rval += str[i]; bscount = 0; } else { rval += str[i]; bscount = 0; } } return rval; } /** * Parsing a Json string * * @param json : string json * @return vector of datapoints */ std::vector<Datapoint*> *Datapoint::parseJson(const std::string& json) { rapidjson::Document document; const auto& parseResult = document.Parse(json.c_str()); if (parseResult.HasParseError()) { Logger::getLogger()->fatal("Parsing error %d (%s).", parseResult.GetParseError(), json.c_str()); printf("Parsing error %d (%s).", parseResult.GetParseError(), json.c_str()); return nullptr; } if (!document.IsObject()) { return nullptr; } return recursiveJson(document); } /** * Recursive method to convert a JSON string to a datapoint * * @param document : object rapidjon * @return vector of datapoints */ std::vector<Datapoint*> *Datapoint::recursiveJson(const rapidjson::Value& document) { std::vector<Datapoint*>* p = new std::vector<Datapoint*>(); for (rapidjson::Value::ConstMemberIterator itr = document.MemberBegin(); itr != document.MemberEnd(); ++itr) { if (itr->value.IsObject()) { std::vector<Datapoint*> * vec = recursiveJson(itr->value); DatapointValue d(vec, true); p->push_back(new Datapoint(itr->name.GetString(), d)); } else if (itr->value.IsString()) { DatapointValue d(itr->value.GetString()); p->push_back(new Datapoint(itr->name.GetString(), d)); } else if (itr->value.IsDouble()) { DatapointValue d(itr->value.GetDouble()); p->push_back(new Datapoint(itr->name.GetString(), d)); } else if (itr->value.IsNumber() && itr->value.IsInt()) { DatapointValue d((long)itr->value.GetInt()); p->push_back(new Datapoint(itr->name.GetString(), d)); } else if (itr->value.IsNumber() && itr->value.IsUint()) { DatapointValue d((long)itr->value.GetUint()); p->push_back(new Datapoint(itr->name.GetString(), d)); } else if (itr->value.IsNumber() && itr->value.IsInt64()) { DatapointValue d((long)itr->value.GetInt64()); p->push_back(new Datapoint(itr->name.GetString(), d)); } else if (itr->value.IsNumber() && itr->value.IsUint64()) { DatapointValue d((long)itr->value.GetUint64()); p->push_back(new Datapoint(itr->name.GetString(), d)); } } return p; } ================================================ FILE: C/common/datapoint_utility.cpp ================================================ /* * Datapoint utility. * * Copyright (c) 2020, RTE (https://www.rte-france.com) * * Released under the Apache 2.0 Licence * * Author: Yannick Marchetaux * */ #include <datapoint_utility.h> #include <vector> using namespace std; /** * Search a dictionary from a key * * @param dict : parent dictionary * @param key : key to research * @return vector of datapoint otherwise null pointer */ DatapointUtility::Datapoints *DatapointUtility::findDictElement(Datapoints *dict, const string& key) { return findDictOrListElement(dict, key, DatapointValue::T_DP_DICT); } /** * Search a array from a key * * @param dict : parent dictionary * @param key : key to research * @return vector of datapoint otherwise null pointer */ DatapointUtility::Datapoints *DatapointUtility::findListElement(Datapoints *dict, const string& key) { return findDictOrListElement(dict, key, DatapointValue::T_DP_LIST); } /** * Search a list or dictionary from a key * * @param dict : parent dictionary * @param key : key to research * @param type : type of data searched * @return vector of datapoint otherwise null pointer */ DatapointUtility::Datapoints *DatapointUtility::findDictOrListElement(Datapoints *dict, const string& key, DatapointValue::dataTagType type) { Datapoint *dp = findDatapointElement(dict, key); if (dp == nullptr) { return nullptr; } DatapointValue& data = dp->getData(); if (data.getType() == type) { return data.getDpVec(); } return nullptr; } /** * Search a DatapointValue from a key * * @param dict : parent dictionary * @param key : key to research * @return corresponding datapointValue otherwise null pointer */ DatapointValue *DatapointUtility::findValueElement(Datapoints *dict, const string& key) { Datapoint *dp = findDatapointElement(dict, key); if (dp == nullptr) { return nullptr; } return &dp->getData(); } /** * Search a Datapoint from a key * * @param dict : parent dictionary * @param key : key to research * @return corresponding datapoint otherwise null pointer */ Datapoint *DatapointUtility::findDatapointElement(Datapoints *dict, const string& key) { if (dict == nullptr) { return nullptr; } for (Datapoint *dp : *dict) { if (dp->getName() == key) { return dp; } } return nullptr; } /** * Search a string from a key * * @param dict : parent dictionary * @param key : key to research * @return corresponding string otherwise empty string */ string DatapointUtility::findStringElement(Datapoints *dict, const string& key) { Datapoint *dp = findDatapointElement(dict, key); if (dp == nullptr) { return ""; } DatapointValue& data = dp->getData(); const DatapointValue::dataTagType dType(data.getType()); if (dType == DatapointValue::T_STRING) { return data.toStringValue(); } return ""; } /** * Method to delete and to free elements from a vector * * @param dps dict of values * @param key key of dict */ void DatapointUtility::deleteValue(Datapoints *dps, const string& key) { for (Datapoints::iterator it = dps->begin(); it != dps->end(); it++){ if ((*it)->getName() == key) { delete (*it); dps->erase(it); break; } } } /** * Generate default attribute integer on Datapoint * * @param dps dict of values * @param key key of dict * @param valueDefault value attribute of dict * @return pointer of the created datapoint */ Datapoint *DatapointUtility::createIntegerElement(Datapoints *dps, const string& key, long valueDefault) { deleteValue(dps, key); DatapointValue dv(valueDefault); Datapoint *dp = new Datapoint(key, dv); dps->push_back(dp); return dp; } /** * Generate default attribute string on Datapoint * * @param dps dict of values * @param key key of dict * @param valueDefault value attribute of dict * @return pointer of the created datapoint */ Datapoint *DatapointUtility::createStringElement(Datapoints *dps, const string& key, const string& valueDefault) { deleteValue(dps, key); DatapointValue dv(valueDefault); Datapoint *dp = new Datapoint(key, dv); dps->push_back(dp); return dp; } /** * Generate default attribute dict on Datapoint * * @param dps dict of values * @param key key of dict * @param dict if the element is a dictionary * @return pointer of the created datapoint */ Datapoint *DatapointUtility::createDictOrListElement(Datapoints* dps, const string& key, bool dict) { deleteValue(dps, key); Datapoints *newVec = new Datapoints; DatapointValue dv(newVec, dict); Datapoint *dp = new Datapoint(key, dv); dps->push_back(dp); return dp; } /** * Generate default attribute dict on Datapoint * * @param dps dict of values * @param key key of dict * @return pointer of the created datapoint */ Datapoint *DatapointUtility::createDictElement(Datapoints* dps, const string& key) { return createDictOrListElement(dps, key, true); } /** * Generate default attribute list on Datapoint * * @param dps dict of values * @param key key of dict * @return pointer of the created datapoint */ Datapoint *DatapointUtility::createListElement(Datapoints* dps, const string& key) { return createDictOrListElement(dps, key, false); } ================================================ FILE: C/common/file_utils.cpp ================================================ /* * Fledge utilities functions for handling files and directories * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Ray Verhoeff */ #include <stdio.h> #include <unistd.h> #include <fcntl.h> #include <ftw.h> #include <stdexcept> #include "file_utils.h" /** * Callback for Linux file walk routine 'nftw' * * @param filePath File full path * @param sb struct stat to hold file information * @param typeflag File type flag: FTW_F = file, FTW_D = directory * @param ftwbuf struct FTW to hold name offset and file depth * @return Zero if successful */ static int fileDeleteCallback(const char *filePath, const struct stat *sb, int typeflag, struct FTW *ftwbuf) { return remove(filePath); } /** * Copy a file * * @param to Full path of the destination file * @param from Full path of the source file * @return Zero if successful */ int copyFile(const char *to, const char *from) { int fd_to, fd_from; char buf[4096]; ssize_t nread; int saved_errno; fd_from = open(from, O_RDONLY); if (fd_from < 0) return -1; fd_to = open(to, O_WRONLY | O_CREAT | O_EXCL, 0666); if (fd_to < 0) goto out_error; while (nread = read(fd_from, buf, sizeof buf), nread > 0) { char *out_ptr = buf; ssize_t nwritten; do { nwritten = write(fd_to, out_ptr, nread); if (nwritten >= 0) { nread -= nwritten; out_ptr += nwritten; } else if (errno != EINTR) { goto out_error; } } while (nread > 0); } if (nread == 0) { if (close(fd_to) < 0) { fd_to = -1; goto out_error; } close(fd_from); /* Success! */ return 0; } out_error: saved_errno = errno; close(fd_from); if (fd_to >= 0) close(fd_to); errno = saved_errno; return -1; } /** * Create a single directory. * This routine cannot create a directory tree from a full path. * This routine throws a std::runtime_error exception if the directory cannot be created. * * @param directoryName Full path of the directory to create */ void createDirectory(const std::string &directoryName) { const char *path = directoryName.c_str(); struct stat sb; if (stat(path, &sb) == 0) { if (sb.st_mode & S_IFDIR) { return; // Directory exists } else { std::string exceptionMessage = "Path exists but is not a directory: " + directoryName; throw std::runtime_error(exceptionMessage.c_str()); } } else { int retcode; if ((retcode = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH)) != 0) { std::string exceptionMessage = "Unable to create directory " + directoryName + ": error: " + std::to_string(retcode); throw std::runtime_error(exceptionMessage.c_str()); } } } /** * Remove a directory with all subdirectories and files * * @param path Full path of the directory * @return Zero if successful */ int removeDirectory(const char *path) { return nftw(path, fileDeleteCallback, 64, FTW_DEPTH | FTW_PHYS); } ================================================ FILE: C/common/filter_pipeline.cpp ================================================ /* * Fledge plugin filter class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <filter_pipeline.h> #include <config_handler.h> #include <service_handler.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #define JSON_CONFIG_FILTER_ELEM "filter" #define JSON_CONFIG_PIPELINE_ELEM "pipeline" using namespace std; /** * FilterPipeline class constructor * * This class abstracts the filter pipeline interface * * @param mgtClient Management client handle * @param storage Storage client handle * @param serviceName Name of the service to which this pipeline applies */ FilterPipeline::FilterPipeline(ManagementClient* mgtClient, StorageClient& storage, string serviceName) : mgtClient(mgtClient), storage(storage), serviceName(serviceName), m_ready(false), m_shutdown(false) { } /** * FilterPipeline destructor */ FilterPipeline::~FilterPipeline() { } /** * Load the specified filter plugin * * @param filterName The filter plugin to load * @return Plugin handle on success, NULL otherwise * */ PLUGIN_HANDLE FilterPipeline::loadFilterPlugin(const string& filterName) { if (filterName.empty()) { Logger::getLogger()->error("Unable to fetch filter plugin '%s' from configuration.", filterName.c_str()); // Failure return NULL; } Logger::getLogger()->info("Loading filter plugin '%s'.", filterName.c_str()); PluginManager* manager = PluginManager::getInstance(); PLUGIN_HANDLE handle; if ((handle = manager->loadPlugin(filterName, PLUGIN_TYPE_FILTER)) != NULL) { // Suceess Logger::getLogger()->info("Loaded filter plugin '%s'.", filterName.c_str()); } return handle; } /** * Load all filter plugins in the pipeline * * @param categoryName Configuration category name * @return True if filters are loaded (or no filters at all) * False otherwise */ bool FilterPipeline::loadFilters(const string& categoryName) { vector<string> children; // The Child categories of 'Filters' try { // Get the category with values and defaults ConfigCategory config = mgtClient->getCategory(categoryName); string filter = config.getValue(JSON_CONFIG_FILTER_ELEM); Logger::getLogger()->info("FilterPipeline::loadFilters(): categoryName=%s, filters=%s", categoryName.c_str(), filter.c_str()); if (!filter.empty()) { std::vector<pair<string, PLUGIN_HANDLE>> filterInfo; // Remove \" and leading/trailing " // TODO: improve/change this filter.erase(remove(filter.begin(), filter.end(), '\\' ), filter.end()); size_t i; while (! (i = filter.find('"')) || (i = filter.rfind('"')) == static_cast<unsigned char>(filter.size() - 1)) { filter.erase(i, 1); } //Parse JSON object for filters Document theFilters; theFilters.Parse(filter.c_str()); // The "pipeline" property must be an array if (theFilters.HasParseError() || !theFilters.HasMember(JSON_CONFIG_PIPELINE_ELEM) || !theFilters[JSON_CONFIG_PIPELINE_ELEM].IsArray()) { string errMsg("loadFilters: can not parse JSON '"); errMsg += string(JSON_CONFIG_FILTER_ELEM) + "' property"; Logger::getLogger()->fatal(errMsg.c_str()); throw runtime_error(errMsg); } else { const Value& filterList = theFilters[JSON_CONFIG_PIPELINE_ELEM]; if (!filterList.Size()) { // Empty array, just return true return true; } // Prepare printable list of filters StringBuffer buffer; Writer<StringBuffer> writer(buffer); filterList.Accept(writer); string printableList(buffer.GetString()); string logMsg("loadFilters: found filter(s) "); logMsg += printableList + " for plugin '"; logMsg += categoryName + "'"; Logger::getLogger()->info(logMsg.c_str()); loadPipeline(filterList, m_filters); // We have kept filter default config in the filterInfo map // Handle configuration for each filter for (auto& itr : m_filters) { itr->setupConfiguration(mgtClient, children); } } } m_pipeline = filter; /* * Put all the new catregories in the Filter category parent * Create an empty South category if one doesn't exist */ string parentName = categoryName + " Filters"; DefaultConfigCategory filterConfig(parentName, string("{}")); filterConfig.setDescription("Filters for " + categoryName); mgtClient->addCategory(filterConfig, true); mgtClient->addChildCategories(parentName, children); vector<string> children1; children1.push_back(parentName); mgtClient->addChildCategories(categoryName, children1); return true; } catch (ConfigItemNotFound* e) { delete e; Logger::getLogger()->info("loadFilters: no filters configured for '" + categoryName + "'"); return true; } catch (exception& e) { Logger::getLogger()->fatal("loadFilters: failed to handle '" + categoryName + "' filters."); return false; } catch (...) { Logger::getLogger()->fatal("loadFilters: generic exception while loading '" + categoryName + "' filters."); return false; } } void FilterPipeline::loadPipeline(const Value& filterList, vector<PipelineElement *>& pipeline) { // Try loading all filter plugins: abort on any error for (Value::ConstValueIterator itr = filterList.Begin(); itr != filterList.End(); ++itr) { if (itr->IsString()) { // Get "plugin" item from filterCategoryName string filterCategoryName = itr->GetString(); Logger::getLogger()->info("Creating pipeline filter %s", filterCategoryName.c_str()); try { ConfigCategory filterDetails = mgtClient->getCategory(filterCategoryName); PipelineFilter *element = new PipelineFilter(filterCategoryName, filterDetails); element->setServiceName(serviceName); element->setStorage(&storage); pipeline.emplace_back(element); } catch (exception& e) { Logger::getLogger()->error("Failed to create filter %s: %s", filterCategoryName.c_str(), e.what()); } catch (exception *e) { Logger::getLogger()->error("Failed to create filter %s: %s", filterCategoryName.c_str(), e->what()); } } else if (itr->IsArray()) { // Sub pipeline Logger::getLogger()->info("Creating pipeline branch"); PipelineBranch *element = new PipelineBranch(this); loadPipeline(*itr, element->getBranchElements()); pipeline.emplace_back(element); } else if (itr->IsObject()) { // An object, probably the write destination Logger::getLogger()->warn("This version of Fledge does not support pipelines with different destinations. The destination will be ignored and the data written to the default storage service."); } else { Logger::getLogger()->error("Unexpected object in pipeline definition, ignoring"); } } // End the pipeline with a writer element that sends data to the // ingest of the storage system PipelineWriter *element = new PipelineWriter(); pipeline.emplace_back(element); } /** * Set the filter pipeline * * This method calls the method "plugin_init" for all loadad filters. * Up-to-date filter configurations and Ingest filtering methods * are passed to "plugin_init" * * @param passToOnwardFilter Ptr to function that passes data to next filter * @param useFilteredData Ptr to function that gets final filtered data * @param ingest The ingest class handle * @return True on success, * False otherwise. * @thown Any caught exception */ bool FilterPipeline::setupFiltersPipeline(void *passToOnwardFilter, void *useFilteredData, void *ingest) { bool initErrors = false; string errMsg = "'plugin_init' failed for filter '"; for (auto it = m_filters.begin(); it != m_filters.end(); ++it) { try { if ((*it)->isBranch()) { Logger::getLogger()->info("Set branch functions"); PipelineBranch *branch = (PipelineBranch *)(*it); branch->setFunctions(passToOnwardFilter, useFilteredData, ingest); } Logger::getLogger()->info("Setup element %s", (*it)->getName().c_str()); (*it)->setup(mgtClient, ingest, m_filterCategories); // Iterate the load filters set in the Ingest class m_filters member if ((it + 1) != m_filters.end()) { (*it)->setNext(*(it + 1)); // Set next filter pointer as OUTPUT_HANDLE try { if (!(*it)->init((OUTPUT_HANDLE *)(*(it + 1)), filterReadingSetFn(passToOnwardFilter))) { errMsg += (*it)->getName() + "'"; initErrors = true; break; } } catch (exception& e) { Logger::getLogger()->error("Unable to initialise plugin %s, %s", (*it)->getName().c_str(), e.what()); initErrors = true; break; } } else { // Set the Ingest class pointer as OUTPUT_HANDLE try { if (!(*it)->init((OUTPUT_HANDLE *)(ingest), filterReadingSetFn(useFilteredData))) { errMsg += (*it)->getName() + "'"; initErrors = true; break; } } catch (exception& e) { Logger::getLogger()->error("Unable to initialise plugin %s, %s", (*it)->getName().c_str(), e.what()); initErrors = true; break; } } } // TODO catch specific exceptions catch (...) { throw; } } if (initErrors) { // Failure Logger::getLogger()->fatal("Failed to create pipeline, %s", errMsg.c_str()); return false; } // Set filter pipeline is ready for data ingest m_ready = true; // Set the service handler for the pipeline m_serviceHandler = (ServiceHandler *)ingest; //Success return true; } /** * Cleanup all the loaded filters * * Call "plugin_shutdown" method and free the FilterPlugin object * * @param categoryName Configuration category name * */ void FilterPipeline::cleanupFilters(const string& categoryName) { // Shutdown filters - do this down the pipeline, starting // from the first filter in the pipeline. This allows a filter // to asynchronously send data in the shutdown call to the // next element in the pipeline since that next element has // not yet been asked to shutdown. // // This is not behaviour that is encouraged or designed, but a // small number of Python filters have implemented sending data // during shutdown, hence the need to ensure that data has // somewhere to go. for (auto it = m_filters.begin(); it != m_filters.end(); ++it) { PipelineElement *element = *it; ConfigHandler *configHandler = ConfigHandler::getInstance(mgtClient); element->shutdown(m_serviceHandler, configHandler); } // Delete filters, in reverse order for (auto it = m_filters.rbegin(); it != m_filters.rend(); ++it) { PipelineElement *element = *it; // Free filter delete element; } } /** * Configuration change for one of the filters. Lookup the category name and * find the plugin to call. Call the reconfigure method of that plugin with * the new configuration. * * @param category The name of the configuration category * @param newConfig The new category contents */ void FilterPipeline::configChange(const string& category, const string& newConfig) { auto it = m_filterCategories.find(category); if (it != m_filterCategories.end()) { it->second->reconfigure(newConfig); } } /** * Called when we pass the data into the pipeline. Set the * number of active branches to 1 */ void FilterPipeline::execute() { unique_lock<mutex> lck(m_actives); m_activeBranches = 1; } /** * Wait for all active branches of the pipeline to complete */ void FilterPipeline::awaitCompletion() { unique_lock<mutex> lck(m_actives); while (m_activeBranches > 0) { m_branchActivations.wait(lck); } } /** * A new branch has started in the pipeline */ void FilterPipeline::startBranch() { unique_lock<mutex> lck(m_actives); m_activeBranches++; } /** * A branch in the pipeline has completed */ void FilterPipeline::completeBranch() { unique_lock<mutex> lck(m_actives); m_activeBranches--; if (m_activeBranches == 0) { m_branchActivations.notify_all(); } } /** * Attach the debugger to the pipeline elements * * @return bool True if the pipeline was attached */ bool FilterPipeline::attachDebugger() { bool rval = attachDebugger(m_filters); setDebuggerBuffer(1); return rval; } /** * Attach the debugger to the pipeline elements * * @param pipeline The pipeline (or branch) to attach the debugger * @return bool True if the debugger was attached */ bool FilterPipeline::attachDebugger(const vector<PipelineElement *>& pipeline) { bool ret = true; if (pipeline.size() == 0) { // Makes no sense to attach the debugger to an empty pipeline return false; } for (auto& elem : pipeline) { if (!elem->attachDebugger()) { ret = false; break; } if (elem->isBranch()) { PipelineBranch *branch = (PipelineBranch *)elem; if (!attachDebugger(branch->getBranchElements())) { ret = false; break; } } } if (!ret) { // Detach any partially attached pipeline detachDebugger(pipeline); } return ret; } /** * Detach the debugger from the pipeline elements */ void FilterPipeline::detachDebugger() { detachDebugger(m_filters); } /** * Detach the debugger from the pipeline elements * * @param pipeline The pipeline or branch to detach the debugger from */ void FilterPipeline::detachDebugger(const vector<PipelineElement *>& pipeline) { for (auto& elem : pipeline) { elem->detachDebugger(); if (elem->isBranch()) { PipelineBranch *branch = (PipelineBranch *)elem; detachDebugger(branch->getBranchElements()); } } } /** * Set the debugger buffer size to the pipeline elements * * @param size The request number of readings to buffer */ void FilterPipeline::setDebuggerBuffer(unsigned int size) { setDebuggerBuffer(m_filters, size); } /** * Set the debugger buffer size to the pipeline elements * * @param pipeline The pipeline or branch to set the buffer size for * @param size The desired number of readings to buffer */ void FilterPipeline::setDebuggerBuffer(const vector<PipelineElement *>& pipeline, unsigned int size) { for (auto& elem : pipeline) { elem->setDebuggerBuffer(size); if (elem->isBranch()) { PipelineBranch *branch = (PipelineBranch *)elem; setDebuggerBuffer(branch->getBranchElements(), size); } } } /** * Get the debugger buffer contents for all the pipeline elements * * @return string JSON document with all the buffer contents */ string FilterPipeline::getDebuggerBuffer() { string rval = "{ \"data\" : ["; rval += getDebuggerBuffer(m_filters); rval += "]}"; return rval; } /** * Get the debugger buffer contents for all the pipeline elements * * @param pipeline The pipeline to fetch the buffered data from * @return string JSON document with all the buffer contents */ string FilterPipeline::getDebuggerBuffer(const vector<PipelineElement *>& pipeline) { string rval; for (auto& elem : pipeline) { vector<shared_ptr<Reading>> buf = elem->getDebuggerBuffer(); rval += "{ \"name\" : \""; rval += elem->getName(); rval += "\", \"readings\" : [ "; rval += readingsToJSON(buf); rval += "] }"; if (elem->getNext()) rval += ","; if (elem->isBranch()) { PipelineBranch *branch = (PipelineBranch *)elem; rval += "[ "; rval += getDebuggerBuffer(branch->getBranchElements()); rval += "], "; } } return rval; } /** * Get the debugger buffer contents for all the pipeline elements * * @param name The name of the filter element we return the buffer from * @return string JSON document with all the buffer contents */ string FilterPipeline::getDebuggerBuffer(const string& name) { string rval; for (auto& elem : m_filters) { if (elem->getName().compare(name) == 0) { vector<shared_ptr<Reading>> buf = elem->getDebuggerBuffer(); rval += "{ \"name\" : \""; rval += name; rval += "\", "; rval += readingsToJSON(buf); rval += "}"; } } return rval; } /** * Convert a vector of readings into JSON that we can use to return * the buffered data held at each stage within the filter pipeline. * * @param readings A vector of shared pointers to readings * @return string A JSON structure containing the pipeline buffers */ string FilterPipeline::readingsToJSON(vector<shared_ptr<Reading>> readings) { string rval; for (int j = 0; j < readings.size(); j++) { shared_ptr<Reading> reading = readings[j]; rval += reading->toJSON(); if (j < readings.size() - 1) rval += ","; } return rval; } /** * Replay the data in the first saved buffer to the filter pipeline * * @return bool Returns true if data has been replayed, otehrwise retuns false */ bool FilterPipeline::replayDebugger() { ReadingSet *replay; vector<Reading *> *readings = new vector<Reading *>; PipelineElement *first; if (m_filters.size() > 0) { first = m_filters[0]; } else { // No filters to replay to return false; } if (first) { vector<shared_ptr<Reading>> buf = first->getDebuggerBuffer(); for (int i = 0; i < buf.size(); i++) { if (buf[i]) { readings->emplace_back(new Reading(*buf[i].get())); } } replay = new ReadingSet(readings); if (replay) { first->ingest(replay); } else { return false; } } else { return false; } return true; } ================================================ FILE: C/common/filter_plugin.cpp ================================================ /* * Fledge plugin filter class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <filter_plugin.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #define JSON_CONFIG_FILTER_ELEM "filter" #define JSON_CONFIG_PIPELINE_ELEM "pipeline" using namespace std; /** * FilterPlugin class constructor * * This class wraps the filter plugin C interface and creates * set of function pointers that resolve to the loaded plugin and * enclose in the class. * * @param name The filter name * @param handle The loaded plugin handle * * Set the function pointers to Filter Plugin C API */ FilterPlugin::FilterPlugin(const std::string& name, PLUGIN_HANDLE handle) : Plugin(handle), m_name(name) { // Setup the function pointers to the plugin pluginInit = (PLUGIN_HANDLE (*)(const ConfigCategory *, OUTPUT_HANDLE *, OUTPUT_STREAM output)) manager->resolveSymbol(handle, "plugin_init"); pluginShutdownPtr = (void (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginIngestPtr = (void (*)(PLUGIN_HANDLE, READINGSET *)) manager->resolveSymbol(handle, "plugin_ingest"); pluginShutdownDataPtr = (string (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginStartDataPtr = (void (*)(const PLUGIN_HANDLE, const string& storedData)) manager->resolveSymbol(handle, "plugin_start"); pluginStartPtr = (void (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_start"); pluginReconfigurePtr = (void (*)(PLUGIN_HANDLE, const string&)) manager->resolveSymbol(handle, "plugin_reconfigure"); // Set m_instance default value m_instance = NULL; // Persist data initialised m_plugin_data = NULL; } /** * FilterPlugin destructor */ FilterPlugin::~FilterPlugin() { delete m_plugin_data; } /** * Call the loaded plugin "plugin_init" method * * @param config The filter configuration * @param outHandle The output_handled passed with * filtered data to OUTPUT_STREAM function * @param outputFunc The output_stream function pointer * the filter uses to pass data out * @return The PLUGIN_HANDLE object */ PLUGIN_HANDLE FilterPlugin::init(const ConfigCategory& config, OUTPUT_HANDLE *outHandle, OUTPUT_STREAM outputFunc) { m_instance = this->pluginInit(&config, outHandle, outputFunc); return (m_instance ? &m_instance : NULL); } /** * Call the loaded plugin "plugin_shutdown" method */ void FilterPlugin::shutdown() { // Check if m_instance has been set // and function pointer exists if (m_instance && this->pluginShutdownPtr) { return this->pluginShutdownPtr(m_instance); } } /** * Call the loaded plugin "plugin_shutdown" method * returning plugind data (as string) * * @return Plugin data as JSON string (to be saved into strage layer) */ string FilterPlugin::shutdownSaveData() { string ret(""); // Check if m_instance has been set // and function pointer exists if (m_instance && this->pluginShutdownDataPtr) { ret = this->pluginShutdownDataPtr(m_instance); } return ret; } /** * Call plugin_start */ void FilterPlugin::start() { if (pluginStartPtr) { return this->pluginStartPtr(m_instance); } } /** * Call plugin_reconfigure method * * @param configuration The new filter configuration */ void FilterPlugin::reconfigure(const string& configuration) { if (pluginReconfigurePtr) { return this->pluginReconfigurePtr(m_instance, configuration); } } /** * Call plugin_start passing plugin data. * * @param storedData Plugin data to pass (from storage layer) */ void FilterPlugin::startData(const string& storedData) { // Check pluginStartData function pointer exists if (this->pluginStartDataPtr) { this->pluginStartDataPtr(m_instance, storedData); } } /** * Call the loaded plugin "plugin_ingest" method * * This call ingest the readings through the filters chain * * @param readings The reading set to ingest */ void FilterPlugin::ingest(READINGSET* readings) { if (this->pluginIngestPtr) { return this->pluginIngestPtr(m_instance, readings); } } ================================================ FILE: C/common/form_data.cpp ================================================ /* * Fledge utilities functions for handling HTTP form data upload * with multipart data * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <form_data.h> #include <errno.h> using namespace std; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; // Class constructor with HTTP request object FormData::FormData(shared_ptr<HttpServer::Request> request) { // Boundary in the content has two additional '-' chars m_boundary = "--"; // Get Content-Length from input header, if not found use request size auto header_it = request->header.find("Content-Length"); if (header_it != request->header.end()) { m_size = std::stoull(header_it->second); } else { m_size = request->content.size(); } // Get "Content-Type" which has content like: // Content-Type: multipart/form-data; boundary=------------------------XYZ header_it = request->header.find("Content-Type"); if (header_it != request->header.end()) { // Fetch multipart/form-data and boundary auto fileData = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse(header_it->second.c_str()); for (auto it = fileData.begin(); it != fileData.end(); ++it) { if (it->first == "boundary") { m_boundary += it->second.c_str(); } } } // Get row data (const) from client request m_buffer = request->content.data(); } /** * Skip a \r\n sequence * * @param b Current buffer pointer * @return Pointer after \r\n sequence */ uint8_t *FormData::skipSeparator(uint8_t *b) { if ((b + 1) != NULL && (*b == CR && *(b + 1) == LF)) { b += 2; } return b; } /** * Skip a double \r\n sequence * * @param b Current buffer pointer * @return Pointer after the double \r\n sequence */ uint8_t *FormData::skipDoubleSeparator(uint8_t *b) { // Look for \r\n const uint8_t* ptr_end = m_buffer + m_size; for (; b < ptr_end; b++) { if ((b + 1) != NULL && (*b == CR && *(b + 1) == LF)) { break; } } // Skip double \r\n sequence if (b && *b == CR && ((b + 1) && *(b + 1) == LF)) { b += 2; if (b && *b == CR && ((b + 1) && *(b + 1) == LF)) { b += 2; } } return b; } /** * Get end of content block, which can be binary data * * @param b Current buffer pointer * @return Pointer after the \r\n sequence + boundary */ uint8_t *FormData::getContentEnd(uint8_t *b) { if (!b) { return NULL; } // Check content bytes // Look for boundary after \r\n as content end uint8_t *endOfContent = NULL; const uint8_t* ptr_end = m_buffer + m_size; for (; b < ptr_end; b++) { // Found \r\n if ((b + 2) != NULL && (*b == CR && *(b + 1) == LF)) { endOfContent = (uint8_t *)strstr((char *)(b + 2), m_boundary.c_str()); if (endOfContent) { // Found boundary: content ends here break; } } } // Boundary found if (endOfContent && (endOfContent - 2)) { // Remove \r\n from end location endOfContent -= 2; } return endOfContent; } /** * Get given field name in the data buffer * * @param buffer Current buffer pointer * @param field The field name to find * @return Pointer to filed value * if found or NULL otherwise */ uint8_t *FormData::findDataFormField(uint8_t* buffer, const string& field) { // Find first Content-Disposition: field uint8_t* b = buffer; uint8_t* ptr = b; const uint8_t* ptr_end = m_buffer + m_size; string name = "\"" + field + "\""; string find = "form-data; name=" + name; bool found = false; while (ptr < ptr_end) { // Look for boundary in content data char *boundaryEnd = strstr((char *)ptr, m_boundary.c_str()); if (boundaryEnd == NULL) { // No boundary, return NULL return NULL; } // Point to end of boundary ptr += m_boundary.length(); // Skip single \r\n b = this->skipSeparator(ptr); ptr = (uint8_t *)strstr((char *)b, "Content-Disposition:"); if (ptr == NULL) { break; } b = ptr + strlen("Content-Disposition:"); // Look for "form-data; " and "name=" as per input field ptr = (uint8_t *)strstr((char *)b, find.c_str()); // Given field name found ? if (ptr != NULL) { // Point to the end of mathed string ptr += find.length(); found = true; break; } else { // Field name not found: look for next boundary after \r\n for (; b < ptr_end; b++) { // Look for \r\n if ((b + 2) != NULL && (*b == CR && *(b + 1) == LF)) { if (strstr((char *)(b + 2), m_boundary.c_str()) != NULL) { // Look for boundary uint8_t *foundBoundary = (uint8_t *)strstr((char *)(b + 2), m_boundary.c_str()); if (foundBoundary) { b = foundBoundary; break; } } } } ptr = b; } } return (found ? ptr : NULL); } /** * Fetch content data uploaded without file, example * curl -v -v -v --output - -X POST -F 'attributes={"name": "B1", "type": "model"}' 127.0.0.1:43605/fledge/south/uploadA * * @param field The field name to fetch * @param data The value reference to fill on success */ void FormData::getUploadedData(const string &field, FieldValue& data) { // Point to buffer start uint8_t* b = (uint8_t *)m_buffer; // Get field name if it exists uint8_t* ptr = this->findDataFormField(b, field); if (ptr == NULL) { return; } b = ptr; uint8_t *endContent = this->getContentEnd(b); // Look for Content-Type, if existent within the // same part of the message, i.e. not beyond endContent ptr = (uint8_t *)strstr((char *)b, "Content-Type:"); if (ptr != NULL && ptr < endContent) { b = ptr + strlen("Content-Type:"); } // Check for \r\n sequence b = this->skipDoubleSeparator(b); // Content starts here uint8_t *startContent = b; // Find end of content if (endContent) { // Set output data // Buffer start and size data.start = startContent; data.size = (size_t)(endContent - startContent); } else { Logger::getLogger()->error("Closing boundary not found for data content"); } } /** * Fetch content data uploaded as file, example * curl -v -v -v --output - -X POST -F "bucket=@/some_path/file.bin" 127.0.0.1:43605/fledge/south/uploadA * * @param field The field name (filename type) to fetch * @param data The value reference to fill on success */ void FormData::getUploadedFile(const string& field, FieldValue& data) { // Point to buffer start uint8_t* b = (uint8_t *)m_buffer; // Get field name if it exists uint8_t* ptr = findDataFormField(b, field); if (ptr == NULL) { return; } b = ptr; // Check for ';' after name' if (*b != ';') { return; } // Look for filename ptr = (uint8_t *)strstr((char *)b, "filename="); if (ptr == NULL) { return; } b = ptr + strlen("filename="); // Look for Content-Type ptr = (uint8_t *)strstr((char *)b, "Content-Type:"); if (ptr == NULL) { return; } // Get filename string fileName; if (*(ptr - 2) == CR && (*(ptr - 1) == LF)) { size_t fNameSize = (ptr - 2) - b; // Skip leading an trailing '"' if (*b == '"') { // Filename starts after '"' b++; // Size -i 1 fNameSize--; } if (*(ptr - 2 - 1) == '"') { // Size - 1 fNameSize--; } // Set filename as in uploaded content // Caller might use this or select another name // while saving the content into a file fileName.assign((char *)b, fNameSize); } b = ptr + strlen("Content-Type:"); // Check for \r\n sequence b = this->skipDoubleSeparator(b); // File content starts here uint8_t *startContent = b; // Find end of content uint8_t *endContent = this->getContentEnd(b); if (endContent) { // Set output data // Buffer start and size data.start = startContent; data.size = (size_t)(endContent - startContent); // Set filename data.filename = fileName; } else { Logger::getLogger()->error("Closing boundary not found for file content"); } } /** * Save the uploaded file * * @param v The Field value data * @return Returns true if the file was succesfully saved */ bool FormData::saveFile(FormData::FieldValue& v, const string& fileName) { Logger::getLogger()->debug("Uploaded filename is '%s'", v.filename.c_str()); // v.filename holds the file name as per upload content Logger::getLogger()->debug("Saving uploaded file as '%s', size is %ld bytes", fileName.c_str(), v.size); // Create file int fd = open(fileName.c_str(), O_RDWR | O_CREAT | O_TRUNC, (mode_t)0644); if (fd == -1) { // An error occurred char errBuf[128]; char *e = strerror_r(errno, errBuf, sizeof(errBuf)); Logger::getLogger()->error("Error while creating filename '%s': %s", fileName.c_str(), e); return false; } // Write file from v.start, v.size bytes if (write(fd, (const void *)v.start, v.size) == -1) { // An error occurred char errBuf[128]; char *e = strerror_r(errno, errBuf, sizeof(errBuf)); Logger::getLogger()->error("Error while writing to file '%s': %s", fileName.c_str(), e); close(fd); return false; } // Close file close(fd); return true; } ================================================ FILE: C/common/image.cpp ================================================ /* * Fledge DPImage class * * Copyright (c) 2020 Dianomic System * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <dpimage.h> #include <logger.h> #include <string.h> #include <exception> #include <stdexcept> using namespace std; /** * DPImage constructor * * @param width The image width * @param height The image height * @param depth The image depth * @param data The actual image data */ DPImage::DPImage(int width, int height, int depth, void *data) : m_width(width), m_height(height), m_depth(depth) { m_byteSize = width * height * (depth / 8); m_pixels = (void *)malloc(m_byteSize); if (m_pixels) { memcpy(m_pixels, data, m_byteSize); } else { throw runtime_error("Insufficient memory to store image"); } } /** * Copy constructor * * @param DPImage The image to copy */ DPImage::DPImage(const DPImage& rhs) { m_width = rhs.m_width; m_height = rhs.m_height; m_depth = rhs.m_depth; m_byteSize = m_width * m_height * (m_depth / 8); m_pixels = (void *)malloc(m_byteSize); if (m_pixels) { memcpy(m_pixels, rhs.m_pixels, m_byteSize); } else { throw runtime_error("Insufficient memory to store image"); } } /** * Assignment operator * @param rhs Righthand side of equals operator */ DPImage& DPImage::operator=(const DPImage& rhs) { // Free any old data if (m_pixels) free(m_pixels); m_width = rhs.m_width; m_height = rhs.m_height; m_depth = rhs.m_depth; m_byteSize = m_width * m_height * (m_depth / 8); m_pixels = (void *)malloc(m_byteSize); if (m_pixels) { memcpy(m_pixels, rhs.m_pixels, m_byteSize); } else { throw runtime_error("Insufficient memory to store image"); } return *this; } /** * Destructor for the image */ DPImage::~DPImage() { if (m_pixels) free(m_pixels); m_pixels = NULL; } ================================================ FILE: C/common/include/JSONPath.h ================================================ /* * Fledge RapaidJSON JSONPath search helper * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #ifndef _JSONPATH_H #define _JSONPATH_H #include <rapidjson/document.h> #include <string> #include <vector> #include <logger.h> /** * A simple implementation of a JSON Path search mechanism to use * alongside RapidJSON */ class JSONPath { public: JSONPath(const std::string& path); ~JSONPath(); rapidjson::Value *findNode(rapidjson::Value& root); private: class PathComponent { public: virtual rapidjson::Value *match(rapidjson::Value *node) = 0; }; class LiteralPathComponent : public PathComponent { public: LiteralPathComponent(std::string& name); rapidjson::Value *match(rapidjson::Value *node); private: std::string m_name; }; class IndexPathComponent : public PathComponent { public: IndexPathComponent(std::string& name, int index); rapidjson::Value *match(rapidjson::Value *node); private: std::string m_name; int m_index; }; class MatchPathComponent : public PathComponent { public: MatchPathComponent(std::string& name, std::string& property, std::string& value); rapidjson::Value *match(rapidjson::Value *node); private: std::string m_name; std::string m_property; std::string m_value; }; void parse(); std::string m_path; std::vector<PathComponent *> m_parsed; Logger *m_logger; }; #endif ================================================ FILE: C/common/include/acl.h ================================================ #ifndef _ACL_H #define _ACL_H /* * Fledge ACL management * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <string> #include <vector> /** * This class represents the ACL (Access Control List) * as JSON object fetched from Fledge Storage * * There are utility methods along with ACLReason class for changes handking */ class ACL { public: ACL() {}; ACL(const std::string &json); const std::string& getName() { return m_name; }; class KeyValueItem { public: KeyValueItem(const std::string& k, const std::string& v) : key(k), value(v) {}; std::string key; std::string value; }; class UrlItem { public: UrlItem(const std::string& url, const std::vector<KeyValueItem>& acl) : url(url), acl(acl) {}; std::string url; std::vector<KeyValueItem> acl; }; public: const std::vector<KeyValueItem>& getService() { return m_service; }; const std::vector<UrlItem>& getURL() { return m_url; }; private: std::string m_name; std::vector<KeyValueItem> m_service; std::vector<UrlItem> m_url; public: /** * This class represents the ACL security change request * * Parsed JSON should have string attributes 'reason' and 'argument' */ class ACLReason { public: ACLReason(const std::string &reason); const std::string& getReason() { return m_reason; }; const std::string& getArgument() { return m_argument; }; private: std::string m_reason; std::string m_argument; }; }; /** * Custom exception ACLMalformed */ class ACLMalformed : public std::exception { public: virtual const char *what() const throw() { return "ACL JSON is malformed"; } }; /** * Custom exception ACLReasonMalformed */ class ACLReasonMalformed : public std::exception { public: virtual const char *what() const throw() { return "ACL Reason JSON is malformed"; } }; #endif ================================================ FILE: C/common/include/aggregate.h ================================================ #ifndef _AGGREGRATE_H #define _AGGREGRATE_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> /** * Aggregate clause in a selection of records */ class Aggregate { public: Aggregate(const std::string& operation, const std::string& column) : m_column(column), m_operation(operation) {}; ~Aggregate() {}; std::string toJSON(); private: const std::string m_column; const std::string m_operation; }; #endif ================================================ FILE: C/common/include/asset_tracking.h ================================================ #ifndef _ASSET_TRACKING_H #define _ASSET_TRACKING_H /* * Fledge asset tracking related * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora, Massimiliano Pinto */ #include <logger.h> #include <vector> #include <set> #include <sstream> #include <unordered_set> #include <management_client.h> #include <queue> #include <thread> #include <mutex> #include <condition_variable> #include <storage_client.h> #define MIN_ASSET_TRACKER_UPDATE 500 // The minimum interval for asset tracker updates /** * Tracking abstract base class to be passed in the process data queue */ class TrackingTuple { public: TrackingTuple() {}; virtual ~TrackingTuple() = default; virtual InsertValues processData(bool storage_connected, ManagementClient *mgtClient, bool &warned, std::string &instanceName) = 0; virtual std::string assetToString() = 0; }; /** * The AssetTrackingTuple class is used to represent an asset * tracking tuple. Hash function and '==' operator are defined for * this class and pointer to this class that would be required * to create an unordered_set of this class. */ class AssetTrackingTuple : public TrackingTuple { public: std::string assetToString() { std::ostringstream o; o << "service:" << m_serviceName << ", plugin:" << m_pluginName << ", asset:" << m_assetName << ", event:" << m_eventName << ", deprecated:" << m_deprecated; return o.str(); } inline bool operator==(const AssetTrackingTuple& x) const { return ( x.m_serviceName==m_serviceName && x.m_pluginName==m_pluginName && x.m_assetName==m_assetName && x.m_eventName==m_eventName); }; AssetTrackingTuple(const std::string& service, const std::string& plugin, const std::string& asset, const std::string& event, const bool& deprecated = false) : m_serviceName(service), m_pluginName(plugin), m_assetName(asset), m_eventName(event), m_deprecated(deprecated) {} std::string &getAssetName() { return m_assetName; }; std::string getPluginName() { return m_pluginName;} std::string getEventName() { return m_eventName;} std::string getServiceName() { return m_serviceName;} bool isDeprecated() { return m_deprecated; }; void unDeprecate() { m_deprecated = false; }; InsertValues processData(bool storage_connected, ManagementClient *mgtClient, bool &warned, std::string &instanceName); public: std::string m_serviceName; std::string m_pluginName; std::string m_assetName; std::string m_eventName; private: bool m_deprecated; }; struct AssetTrackingTuplePtrEqual { bool operator()(AssetTrackingTuple const* a, AssetTrackingTuple const* b) const { return *a == *b; } }; namespace std { template <> struct hash<AssetTrackingTuple> { size_t operator()(const AssetTrackingTuple& t) const { return (std::hash<std::string>()(t.m_serviceName + t.m_pluginName + t.m_assetName + t.m_eventName)); } }; template <> struct hash<AssetTrackingTuple*> { size_t operator()(AssetTrackingTuple* t) const { return (std::hash<std::string>()(t->m_serviceName + t->m_pluginName + t->m_assetName + t->m_eventName)); } }; } class StorageAssetTrackingTuple : public TrackingTuple { public: StorageAssetTrackingTuple(const std::string& service, const std::string& plugin, const std::string& asset, const std::string& event, const bool& deprecated = false, const std::string& datapoints = "", unsigned int c = 0) : m_datapoints(datapoints), m_maxCount(c), m_serviceName(service), m_pluginName(plugin), m_assetName(asset), m_eventName(event), m_deprecated(deprecated) {}; inline bool operator==(const StorageAssetTrackingTuple& x) const { return ( x.m_serviceName==m_serviceName && x.m_pluginName==m_pluginName && x.m_assetName==m_assetName && x.m_eventName==m_eventName); }; std::string assetToString() { std::ostringstream o; o << "service:" << m_serviceName << ", plugin:" << m_pluginName << ", asset:" << m_assetName << ", event:" << m_eventName << ", deprecated:" << m_deprecated << ", m_datapoints:" << m_datapoints << ", m_maxCount:" << m_maxCount; return o.str(); }; bool isDeprecated() { return m_deprecated; }; unsigned int getMaxCount() { return m_maxCount; } std::string getDataPoints() { return m_datapoints; } void unDeprecate() { m_deprecated = false; }; void setDeprecate() { m_deprecated = true; }; InsertValues processData(bool storage, ManagementClient *mgtClient, bool &warned, std::string &instanceName); public: std::string m_datapoints; unsigned int m_maxCount; std::string m_serviceName; std::string m_pluginName; std::string m_assetName; std::string m_eventName; private: bool m_deprecated; }; struct StorageAssetTrackingTuplePtrEqual { bool operator()(StorageAssetTrackingTuple const* a, StorageAssetTrackingTuple const* b) const { return *a == *b; } }; namespace std { template <> struct hash<StorageAssetTrackingTuple> { size_t operator()(const StorageAssetTrackingTuple& t) const { return (std::hash<std::string>()(t.m_serviceName + t.m_pluginName + t.m_assetName + t.m_eventName)); } }; template <> struct hash<StorageAssetTrackingTuple*> { size_t operator()(StorageAssetTrackingTuple* t) const { return (std::hash<std::string>()(t->m_serviceName + t->m_pluginName + t->m_assetName + t->m_eventName)); } }; } typedef std::unordered_map<StorageAssetTrackingTuple*, std::set<std::string>, std::hash<StorageAssetTrackingTuple*>, StorageAssetTrackingTuplePtrEqual> StorageAssetCacheMap; typedef std::unordered_map<StorageAssetTrackingTuple*, std::set<std::string>, std::hash<StorageAssetTrackingTuple*>, StorageAssetTrackingTuplePtrEqual>::iterator StorageAssetCacheMapItr; class ManagementClient; /** * The AssetTracker class provides the asset tracking functionality. * There are methods to populate asset tracking cache from asset_tracker DB table, * and methods to check/add asset tracking tuples to DB and to cache */ class AssetTracker { public: AssetTracker(ManagementClient *mgtClient, std::string service); ~AssetTracker(); static AssetTracker *getAssetTracker(); void populateAssetTrackingCache(std::string plugin, std::string event); void populateStorageAssetTrackingCache(); bool checkAssetTrackingCache(AssetTrackingTuple& tuple); AssetTrackingTuple* findAssetTrackingCache(AssetTrackingTuple& tuple); void addAssetTrackingTuple(AssetTrackingTuple& tuple); void addAssetTrackingTuple(std::string plugin, std::string asset, std::string event); void addStorageAssetTrackingTuple(StorageAssetTrackingTuple& tuple, std::set<std::string>& dpSet, bool addObj = false); StorageAssetTrackingTuple* findStorageAssetTrackingCache(StorageAssetTrackingTuple& tuple); std::string getIngestService(const std::string& asset) { return getService("Ingest", asset); }; std::string getEgressService(const std::string& asset) { return getService("Egress", asset); }; void workerThread(); bool getDeprecated(StorageAssetTrackingTuple* ptr); void updateCache(std::set<std::string> dpSet, StorageAssetTrackingTuple* ptr); std::set<std::string> *getStorageAssetTrackingCacheData(StorageAssetTrackingTuple* tuple); bool tune(unsigned long updateInterval); private: std::string getService(const std::string& event, const std::string& asset); void queue(TrackingTuple *tuple); void processQueue(); std::set<std::string> getDataPointsSet(std::string strDatapoints); bool getFledgeConfigInfo(); private: static AssetTracker *instance; ManagementClient *m_mgtClient; std::string m_service; std::unordered_set<AssetTrackingTuple*, std::hash<AssetTrackingTuple*>, AssetTrackingTuplePtrEqual> assetTrackerTuplesCache; std::queue<TrackingTuple *> m_pending; // Tuples that are not yet written to the storage std::thread *m_thread; bool m_shutdown; std::condition_variable m_cv; std::mutex m_mutex; std::string m_fledgeName; StorageClient *m_storageClient; StorageAssetCacheMap storageAssetTrackerTuplesCache; unsigned int m_updateInterval; }; /** * A class to hold a set of asset tracking tuples that allows * lookup by name. */ class AssetTrackingTable { public: AssetTrackingTable(); ~AssetTrackingTable(); void add(AssetTrackingTuple *tuple); void remove(const std::string& name); AssetTrackingTuple *find(const std::string& name); private: std::map<std::string, AssetTrackingTuple *> m_tuples; }; #endif ================================================ FILE: C/common/include/audit_logger.h ================================================ #ifndef _AUDIT_LOGGER_H #define _AUDIT_LOGGER_H /* * Fledge Singleton Audit Logger interface * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <management_client.h> #include <string> /** * A singleton class for access to the audit logger within services. The * service must create this with the maagement client before any access to it is used. */ class AuditLogger { public: AuditLogger(ManagementClient *mgmt); ~AuditLogger(); static AuditLogger *getLogger(); static void auditLog(const std::string& code, const std::string& level, const std::string& data = ""); void audit(const std::string& code, const std::string& level, const std::string& data = ""); private: static AuditLogger *m_instance; ManagementClient *m_mgmt; }; #endif ================================================ FILE: C/common/include/base64.h ================================================ #ifndef _BASE64_H_ #define _BASE64_H_ #include <cstdint> /* * Fledge Base64 encoding and decoding tables * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ static const char encodingTable[] = { 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/' }; static const uint8_t decodingTable[] = { 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 62, 64, 64, 64, 63, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 64, 64, 64, 0, 64, 64, 64, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 64, 64, 64, 64, 64, 64, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64 }; #endif ================================================ FILE: C/common/include/base64databuffer.h ================================================ #ifndef _BASE64_DATA_BUFFER_H_ #define _BASE64_DATA_BUFFER_H_ /* * Fledge Base64DataBuffer encoding * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <databuffer.h> #include <string> #include <stdexcept> #include <base64.h> /** * The Base64DataBuffer class provide functionality on top of the * simple DataBuffer class that is used to encode the buffer in * base64 such that it may be stored as string data. */ class Base64DataBuffer : public DataBuffer { public: Base64DataBuffer(const std::string& encoded); std::string encode(); }; #endif ================================================ FILE: C/common/include/base64dpimage.h ================================================ #ifndef _BASE64_DPIMAGE_H_ #define _BASE64_DPIMAGE_H_ /* * Fledge Base64 encoded data point image * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <dpimage.h> #include <string> #include <stdexcept> #include <base64.h> /** * The Base64DPImage provide functionality on top of the * simple DPImage class that is used to encode the buffer in * base64 such that it may be stored as string data. */ class Base64DPImage : public DPImage { public: Base64DPImage(const std::string& encoded); std::string encode(); }; #endif ================================================ FILE: C/common/include/bearer_token.h ================================================ #ifndef _BEARER_TOKEN_H #define _BEARER_TOKEN_H /* * Fledge bearer token utilities * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <server_http.hpp> #include <string> #define AUTH_HEADER "Authorization" #define BEARER_SCHEMA "Bearer " /** * This class represents a JWT bearer token * * The claims are stored after verification to core service API endpoint * */ class BearerToken { public: BearerToken(std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Request> request); BearerToken(std::string& token); ~BearerToken() {}; bool exists() { return m_bearer_token.length() > 0; }; // Return string reference const std::string& token() { return m_bearer_token; }; bool verify(const std::string& serverResponse); unsigned long getExpiration() { return m_expiration; }; // Return string references const std::string& getAudience() { return m_audience; }; const std::string& getSubject() { return m_subject; }; const std::string& getIssuer() { return m_issuer; }; private: bool m_verified; unsigned long m_expiration; std::string m_bearer_token; std::string m_audience; std::string m_subject; std::string m_issuer; }; #endif ================================================ FILE: C/common/include/config_category.h ================================================ #ifndef _CONFIG_CATEGORY_H #define _CONFIG_CATEGORY_H /* * Fledge category management * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <string> #include <vector> #include <map> #include <rapidjson/document.h> #include <json_utils.h> class ConfigCategoryDescription { public: ConfigCategoryDescription(const std::string& name, const std::string& description) : m_name(name), m_displayName(name), m_description(description) {}; ConfigCategoryDescription(const std::string& name, const std::string& displayName, const std::string& description) : m_name(name), m_displayName(displayName), m_description(description) {}; std::string getName() const { return m_name; }; std::string getDisplayName() const { return m_displayName; }; std::string getDescription() const { return m_description; }; // JSON string with m_name and m_description std::string toJSON() const; private: const std::string m_name; const std::string m_displayName; const std::string m_description; }; class ConfigCategories { public: ConfigCategories(const std::string& json); ConfigCategories(); // Constructor without parameters ~ConfigCategories(); unsigned int length() { return m_categories.size(); }; ConfigCategoryDescription *operator[] (const unsigned int idx) { return m_categories[idx]; }; // Add one category name with description void addCategoryDescription(ConfigCategoryDescription* elem); // JSON string of all categories std::string toJSON() const; private: std::vector<ConfigCategoryDescription *> m_categories; }; class ConfigCategory { public: enum ItemType { UnknownType, StringItem, EnumerationItem, JsonItem, BoolItem, NumberItem, DoubleItem, ScriptItem, CategoryType, CodeItem, BucketItem, ListItem, KVListItem }; ConfigCategory(const std::string& name, const std::string& json); ConfigCategory() {}; ConfigCategory(const ConfigCategory& orig); ConfigCategory(const ConfigCategory *orig); ~ConfigCategory(); void addItem(const std::string& name, const std::string description, const std::string& type, const std::string def, const std::string& value); void addItem(const std::string& name, const std::string description, const std::string def, const std::string& value, const std::vector<std::string> options); void removeItems(); void removeItemsType(ItemType type); void keepItemsType(ItemType type); bool extractSubcategory(ConfigCategory &subCategories); void setDescription(const std::string& description); std::string getName() const { return m_name; }; std::string getDescription() const { return m_description; }; std::string getDisplayName() const { return m_displayName; }; void setDisplayName(const std::string& displayName) {m_displayName = displayName;}; unsigned int getCount() const { return m_items.size(); }; bool itemExists(const std::string& name) const; bool setItemDisplayName(const std::string& name, const std::string& displayName); std::string getValue(const std::string& name) const; std::string getValue(const std::string& name, const std::string& defaultValue) const; bool getBoolValue(const std::string& name, bool defaultValue = false) const; int getIntegerValue(const std::string& name, int defaultValue = 0) const; long getLongValue(const std::string& name, long defaultValue = 0) const; double getDoubleValue(const std::string& name, double defaultValue = 0) const; std::vector<std::string> getValueList(const std::string& name) const; std::map<std::string, std::string> getValueKVList(const std::string& name) const; std::string getType(const std::string& name) const; std::string getDescription(const std::string& name) const; std::string getDefault(const std::string& name) const; bool setDefault(const std::string& name, const std::string& value); bool setValue(const std::string& name, const std::string& value); std::string getDisplayName(const std::string& name) const; std::string getmParentName() const {return (m_parent_name);}; std::vector<std::string> getOptions(const std::string& name) const; std::string getLength(const std::string& name) const; std::string getMinimum(const std::string& name) const; std::string getMaximum(const std::string& name) const; bool isString(const std::string& name) const; bool isEnumeration(const std::string& name) const; bool isJSON(const std::string& name) const; bool isBool(const std::string& name) const; bool isNumber(const std::string& name) const; bool isDouble(const std::string& name) const; bool isList(const std::string& name) const; bool isKVList(const std::string& name) const; bool isDeprecated(const std::string& name) const; std::string toJSON(const bool full=false) const; std::string itemsToJSON(const bool full=false) const; ConfigCategory& operator=(ConfigCategory const& rhs); ConfigCategory& operator+=(ConfigCategory const& rhs); void setItemsValueFromDefault(); void checkDefaultValuesOnly() const; std::string itemToJSON(const std::string& itemName) const; std::string to_string(const rapidjson::Value& v) const; std::vector<std::string> getPermissions(const std::string& name) const; bool hasPermission(const std::string& name, const std::string& username) const; enum ItemAttribute { ORDER_ATTR, READONLY_ATTR, MANDATORY_ATTR, FILE_ATTR, MINIMUM_ATTR, MAXIMUM_ATTR, LENGTH_ATTR, VALIDITY_ATTR, GROUP_ATTR, DISPLAY_NAME_ATTR, DEPRECATED_ATTR, RULE_ATTR, BUCKET_PROPERTIES_ATTR, LIST_SIZE_ATTR, ITEM_TYPE_ATTR, LIST_NAME_ATTR, KVLIST_KEY_NAME_ATTR, KVLIST_KEY_DESCRIPTION_ATTR, JSON_SCHEMA_ATTR }; std::string getItemAttribute(const std::string& itemName, ItemAttribute itemAttribute) const; bool setItemAttribute(const std::string& itemName, ItemAttribute itemAttribute, const std::string& value); std::vector<std::pair<std::string,std::string>>* parseBucketItemValue(const std::string &); protected: class CategoryItem { public: CategoryItem(const std::string& name, const rapidjson::Value& item); CategoryItem(const std::string& name, const std::string& description, const std::string& type, const std::string def, const std::string& value); CategoryItem(const std::string& name, const std::string& description, const std::string def, const std::string& value, const std::vector<std::string> options); CategoryItem(const CategoryItem& rhs); // Return both "value" and "default" items std::string toJSON(const bool full=false) const; // Return only "default" items std::string defaultToJSON() const; public: std::string m_name; std::string m_displayName; std::string m_type; std::string m_default; std::string m_value; std::string m_description; std::string m_order; std::string m_readonly; std::string m_mandatory; std::string m_deprecated; std::string m_length; std::string m_minimum; std::string m_maximum; std::string m_filename; std::vector<std::string> m_options; std::string m_file; ItemType m_itemType; std::string m_validity; std::string m_group; std::string m_rule; std::string m_bucketProperties; std::string m_listSize; std::string m_listItemType; std::string m_listName; std::string m_kvlistKeyName; std::string m_kvlistKeyDescription; std::vector<std::string> m_permissions; std::string m_jsonSchema; }; std::vector<CategoryItem *> m_items; std::string m_name; std::string m_parent_name; std::string m_description; std::string m_displayName; public: using iterator = std::vector<CategoryItem *>::iterator; using const_iterator = std::vector<CategoryItem *>::const_iterator; const_iterator begin() const { return m_items.begin(); } const_iterator end() const { return m_items.end(); } const_iterator cbegin() const { return m_items.cbegin(); } const_iterator cend() const { return m_items.cend(); } }; /** * DefaultConfigCategory * * json input parameter must contain only "default" items. * itemsToJSON() reports only "defaults" * * This class must be used when creating/updating a category * via ManagementClient::addCategoryDefault(DefaultConfigCategory categoryDefault) */ class DefaultConfigCategory : public ConfigCategory { public: DefaultConfigCategory(const std::string& name, const std::string& json); DefaultConfigCategory(const ConfigCategory& orig) : ConfigCategory(orig) { }; ~DefaultConfigCategory(); std::string toJSON() const; std::string itemsToJSON() const; }; class ConfigCategoryChange : public ConfigCategory { public: ConfigCategoryChange(const std::string& json); }; class ConfigItemNotFound : public std::exception { public: virtual const char *what() const throw() { return "Configuration item not found in configuration category"; } }; class ConfigMalformed : public std::exception { public: virtual const char *what() const throw() { return "Configuration category JSON is malformed"; } }; /** * This exception must be raised when at least one of the JSON items of a * new being created category have both "value" and "default" fields. */ class ConfigValueFoundWithDefault : public std::exception { public: // Constructor with parameter ConfigValueFoundWithDefault(const std::string& item) { m_errmsg = "Configuration item '"; m_errmsg.append(item); m_errmsg += "' has both 'value' and 'default' fields."; }; virtual const char *what() const throw() { return m_errmsg.c_str(); } private: std::string m_errmsg; }; /** * This exception must be raised when a requested item attribute * does not exist. * Supported item attributes: "order", "readonly", "file". */ class ConfigItemAttributeNotFound : public std::exception { public: virtual const char *what() const throw() { return "Configuration item attribute not found in configuration category"; } }; /** * An attempt has been made to access a configuration item as a list when the * item is not of type list */ class ConfigItemNotAList : public std::exception { public: virtual const char *what() const throw() { return "Configuration item is not a list type item"; } }; #endif ================================================ FILE: C/common/include/cryptography_utils.h ================================================ #ifndef _CRYPTOGRAPHY_UTILS_H #define _CRYPTOGRAPHY_UTILS_H /* * Fledge utilities functions for generating cryptographic hash * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <openssl/sha.h> #include <openssl/md5.h> #include <openssl/opensslv.h> #ifdef OPENSSL_VERSION_NUMBER #if OPENSSL_VERSION_NUMBER >= 0x30000000L #include <openssl/evp.h> #endif #endif #include <string> std::string compute_sha256(const std::string& input); std::string compute_md5(const std::string& input); #endif ================================================ FILE: C/common/include/databuffer.h ================================================ #ifndef _DATABUFFER_H #define _DATABUFFER_H /* * Fledge Databuffer type for datapoints * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <unistd.h> /** * Buffer type for storage of arbitrary buffers of data within a datapoint. * A DataBuffer is essentially a 1 dimensional array of a memory primitive of * itemSize. */ class DataBuffer { public: DataBuffer(size_t itemSize, size_t len); DataBuffer(const DataBuffer& rhs); DataBuffer& operator=(const DataBuffer& rhs); ~DataBuffer(); void populate(void *src, int len); /** * Return the size of each item in the buffer */ size_t getItemSize() { return m_itemSize; }; /** * Return the number of items in the buffer */ size_t getItemCount() { return m_len; }; /** * Return a pointer to the raw data in the data buffer */ void *getData() { return m_data; }; protected: DataBuffer() {}; size_t m_itemSize; size_t m_len; void *m_data; }; #endif ================================================ FILE: C/common/include/datapoint.h ================================================ #ifndef _DATAPOINT_H #define _DATAPOINT_H /* * Fledge * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <string> #include <sstream> #include <iomanip> #include <cfloat> #include <vector> #include <logger.h> #include <dpimage.h> #include <databuffer.h> #include <rapidjson/document.h> #include "string_utils.h" class Datapoint; /** * Class to hold an actual reading value. * The class is simply a tagged union that also contains * methods to return the value as a string for encoding * in a JSON document. */ class DatapointValue { public: /** * Construct with a string */ DatapointValue(const std::string& value) { m_value.str = new std::string(value); m_type = T_STRING; }; /** * Construct with an integer value */ DatapointValue(const long value) { m_value.i = value; m_type = T_INTEGER; }; /** * Construct with a floating point value */ DatapointValue(const double value) { m_value.f = value; m_type = T_FLOAT; }; /** * Construct with an array of floating point values */ DatapointValue(const std::vector<double>& values) { m_value.a = new std::vector<double>(values); m_type = T_FLOAT_ARRAY; }; /** * Construct with an array of Datapoints */ DatapointValue(std::vector<Datapoint*>*& values, bool isDict) { m_value.dpa = values; m_type = isDict? T_DP_DICT : T_DP_LIST; } /** * Construct with an Image */ DatapointValue(const DPImage& value) { m_value.image = new DPImage(value); m_type = T_IMAGE; } /** * Construct with a DataBuffer */ DatapointValue(const DataBuffer& value) { m_value.dataBuffer = new DataBuffer(value); m_type = T_DATABUFFER; } /** * Construct with an Image Pointer, the * image becomes owned by the datapointValue */ DatapointValue(DPImage *value) { m_value.image = value; m_type = T_IMAGE; } /** * Construct with a DataBuffer */ DatapointValue(DataBuffer *value) { m_value.dataBuffer = value; m_type = T_DATABUFFER; } /** * Construct with a 2 dimentional array of floating point values */ DatapointValue(const std::vector< std::vector<double> *>& values) { m_value.a2d = new std::vector< std::vector<double>* >; for (auto row : values) { std::vector<double> *nrow = new std::vector<double>; for (auto& d : *row) { nrow->push_back(d); } m_value.a2d->push_back(nrow); } m_type = T_2D_FLOAT_ARRAY; }; /** * Copy constructor */ DatapointValue(const DatapointValue& obj); /** * Assignment Operator */ DatapointValue& operator=(const DatapointValue& rhs); /** * Destructor */ ~DatapointValue(); /** * Set the value of a datapoint, this may * also cause the type to be changed. * @param value An string value to set */ void setValue(std::string value) { if(m_value.str) { delete m_value.str; } m_value.str = new std::string(value); m_type = T_STRING; } /** * Set the value of a datapoint, this may * also cause the type to be changed. * @param value An integer value to set */ void setValue(long value) { m_value.i = value; m_type = T_INTEGER; } /** * Set the value of a datapoint, this may * also cause the type to be changed. * @param value A floating point value to set */ void setValue(double value) { m_value.f = value; m_type = T_FLOAT; } /** Set the value of a datapoint to be an image * @param value The image to set in the data point */ void setValue(const DPImage& value) { m_value.image = new DPImage(value); m_type = T_IMAGE; } /** * Return the value as a string */ std::string toString() const; /** * Return string value without trailing/leading quotes */ std::string toStringValue() const { return *m_value.str; }; /** * Return long value */ long toInt() const { return m_value.i; }; /** * Return double value */ double toDouble() const { return m_value.f; }; // Supported Data Tag Types typedef enum DatapointTag { T_STRING, T_INTEGER, T_FLOAT, T_FLOAT_ARRAY, T_DP_DICT, T_DP_LIST, T_IMAGE, T_DATABUFFER, T_2D_FLOAT_ARRAY } dataTagType; /** * Return the Tag type */ dataTagType getType() const { return m_type; } std::string getTypeStr() const { switch(m_type) { case T_STRING: return std::string("STRING"); case T_INTEGER: return std::string("INTEGER"); case T_FLOAT: return std::string("FLOAT"); case T_FLOAT_ARRAY: return std::string("FLOAT_ARRAY"); case T_DP_DICT: return std::string("DP_DICT"); case T_DP_LIST: return std::string("DP_LIST"); case T_IMAGE: return std::string("IMAGE"); case T_DATABUFFER: return std::string("DATABUFFER"); case T_2D_FLOAT_ARRAY: return std::string("2D_FLOAT_ARRAY"); default: return std::string("INVALID"); } } /** * Return array of datapoints */ std::vector<Datapoint*>*& getDpVec() { return m_value.dpa; } /** * Return array of float */ std::vector<double>*& getDpArr() { return m_value.a; } /** * Return 2D array of float */ std::vector<std::vector<double>* >*& getDp2DArr() { return m_value.a2d; } /** * Return the Image */ DPImage *getImage() { return m_value.image; } /** * Return the DataBuffer */ DataBuffer *getDataBuffer() { return m_value.dataBuffer; } private: void deleteNestedDPV(); const std::string escape(const std::string& str) const; union data_t { std::string* str; long i; double f; std::vector<double>* a; std::vector<Datapoint*> *dpa; DPImage *image; DataBuffer *dataBuffer; std::vector< std::vector<double>* > *a2d; } m_value; DatapointTag m_type; }; /** * Name and value pair used to represent a data value * within an asset reading. */ class Datapoint { public: /** * Construct with a data point value */ Datapoint(const std::string& name, DatapointValue& value) : m_name(name), m_value(value) { } ~Datapoint() { } /** * Return asset reading data point as a JSON * property that can be included within a JSON * document. */ std::string toJSONProperty() { std::string rval = "\"" + escape(m_name) + "\":"; rval += m_value.toString(); return rval; } /** * Return the Datapoint name */ const std::string getName() const { return m_name; } /** * Rename the datapoint */ void setName(std::string name) { m_name = name; } /** * Return Datapoint value */ const DatapointValue getData() const { return m_value; } /** * Return reference to Datapoint value */ DatapointValue& getData() { return m_value; } /** * Parse a json string and generates * a corresponding datapoint vector */ std::vector<Datapoint*>* parseJson(const std::string& json); std::vector<Datapoint*>* recursiveJson(const rapidjson::Value& document); private: std::string m_name; DatapointValue m_value; }; #endif ================================================ FILE: C/common/include/datapoint_utility.h ================================================ #ifndef INCLUDE_DATAPOINT_UTILITY_H_ #define INCLUDE_DATAPOINT_UTILITY_H_ /* * Fledge * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Yannick Marchetaux * */ #include <vector> #include <string> #include "datapoint.h" #include "reading.h" namespace DatapointUtility { // Define type using Datapoints = std::vector<Datapoint*>; using Readings = std::vector<Reading*>; // Function for search value Datapoints *findDictElement (Datapoints* dict, const std::string& key); DatapointValue *findValueElement (Datapoints* dict, const std::string& key); Datapoint *findDatapointElement (Datapoints* dict, const std::string& key); Datapoints *findDictOrListElement (Datapoints *dict, const std::string& key, DatapointValue::dataTagType type); Datapoints *findListElement (Datapoints *dict, const std::string& key); std::string findStringElement (Datapoints* dict, const std::string& key); // delete void deleteValue(Datapoints *dps, const std::string& key); // Function for create element Datapoint *createStringElement (Datapoints *dps, const std::string& key, const std::string& valueDefault); Datapoint *createIntegerElement (Datapoints *dps, const std::string& key, long valueDefault); Datapoint *createDictElement (Datapoints *dps, const std::string& key); Datapoint *createListElement (Datapoints *dps, const std::string& key); Datapoint *createDictOrListElement (Datapoints* dps, const std::string& key, bool dict); }; #endif // INCLUDE_DATAPOINT_UTILITY_H_ ================================================ FILE: C/common/include/dpimage.h ================================================ #ifndef _DPIMAGE_H #define _DPIMAGE_H /* * Fledge datapoint image * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ /** * Simple Image class that will be used within data points to store image data. * * This class merely acts to encapsulate the data of a simple image in memory, * complex functionality will be supported elsewhere. Images within the class * are stored as a simple, single area of memory the size of which is defined * by the width, hieght and depth of the image. */ class DPImage { public: DPImage() : m_width(0), m_height(0), m_depth(0), m_pixels(0), m_byteSize(0) {}; DPImage(int width, int height, int depth, void *data); DPImage(const DPImage& rhs); DPImage& operator=(const DPImage& rhs); ~DPImage(); /** * Return the height of the image */ int getHeight() { return m_height; }; /** * Return the width of the image */ int getWidth() { return m_width; }; /** * Return the depth of the image in bits */ int getDepth() { return m_depth; }; /** * Return a pointer to the raw data of the image */ void *getData() { return m_pixels; }; protected: int m_width; int m_height; int m_depth; void *m_pixels; int m_byteSize; }; #endif ================================================ FILE: C/common/include/expression.h ================================================ #ifndef _EXPRESSION_H #define _EXPRESSION_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> #include <vector> #include <resultset.h> /** * Class that defines data to be inserted or updated in a column within the table */ class Expression { public: Expression(const std::string& column, const std::string& op, int value) : m_column(column), m_op(op), m_type(INT_COLUMN) { m_value.ival = value; }; Expression(const std::string& column, const std::string& op, double value) : m_column(column), m_op(op), m_type(NUMBER_COLUMN) { m_value.fval = value; }; const std::string toJSON() const { std::ostringstream json; json << "{ \"column\" : \"" << m_column << "\", "; json << "\"operator\" : \"" << m_op << "\", "; json << "\"value\" : "; switch (m_type) { case JSON_COLUMN: case BOOL_COLUMN: case STRING_COLUMN: case NULL_COLUMN: break; case INT_COLUMN: json << m_value.ival; break; case NUMBER_COLUMN: json << m_value.fval; break; } json << "}"; return json.str(); } private: const std::string m_column; const std::string m_op; ColumnType m_type; union { long ival; double fval; } m_value; }; class ExpressionValues : public std::vector<Expression> { public: const std::string toJSON() const { std::ostringstream json; json << "[ "; for (std::vector<Expression>::const_iterator it = this->cbegin(); it != this->cend(); ++it) { json << it->toJSON(); if (it + 1 != this->cend()) json << ", "; else json << " "; } json << "]"; return json.str(); }; }; #endif ================================================ FILE: C/common/include/exprtk.hpp ================================================ /* ****************************************************************** * C++ Mathematical Expression Toolkit Library * * * * Author: Arash Partow (1999-2018) * * URL: http://www.partow.net/programming/exprtk/index.html * * * * Copyright notice: * * Free use of the C++ Mathematical Expression Toolkit Library is * * permitted under the guidelines and in accordance with the most * * current version of the MIT License. * * http://www.opensource.org/licenses/MIT * * * * Example expressions: * * (00) (y + x / y) * (x - y / x) * * (01) (x^2 / sin(2 * pi / y)) - x / 2 * * (02) sqrt(1 - (x^2)) * * (03) 1 - sin(2 * x) + cos(pi / y) * * (04) a * exp(2 * t) + c * * (05) if(((x + 2) == 3) and ((y + 5) <= 9),1 + w, 2 / z) * * (06) (avg(x,y) <= x + y ? x - y : x * y) + 2 * pi / x * * (07) z := x + sin(2 * pi / y) * * (08) u := 2 * (pi * z) / (w := x + cos(y / pi)) * * (09) clamp(-1,sin(2 * pi * x) + cos(y / 2 * pi),+1) * * (10) inrange(-2,m,+2) == if(({-2 <= m} and [m <= +2]),1,0) * * (11) (2sin(x)cos(2y)7 + 1) == (2 * sin(x) * cos(2*y) * 7 + 1) * * (12) (x ilike 's*ri?g') and [y < (3 z^7 + w)] * * * ****************************************************************** */ /* * Mark Riddoch 3rd September 2019 * * A modified version of the mathematical expression toolkit is * included here for use by various Fledge plugins. The modification * allows for the use of escape characters in the definition and use * of symbols. */ #ifndef INCLUDE_EXPRTK_HPP #define INCLUDE_EXPRTK_HPP #define allow_escaped_symbols 1 #include <algorithm> #include <cctype> #include <cmath> #include <complex> #include <cstdio> #include <cstdlib> #include <cstring> #include <deque> #include <exception> #include <functional> #include <iterator> #include <limits> #include <list> #include <map> #include <set> #include <stack> #include <stdexcept> #include <string> #include <utility> #include <vector> namespace exprtk { #ifdef exprtk_enable_debugging #define exprtk_debug(params) printf params #else #define exprtk_debug(params) (void)0 #endif #define exprtk_error_location \ "exprtk.hpp:" + details::to_str(__LINE__) \ #if defined(__GNUC__) && (__GNUC__ >= 7) #define exprtk_disable_fallthrough_begin \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wimplicit-fallthrough\"") \ #define exprtk_disable_fallthrough_end \ _Pragma ("GCC diagnostic pop") \ #else #define exprtk_disable_fallthrough_begin (void)0; #define exprtk_disable_fallthrough_end (void)0; #endif namespace details { typedef unsigned char uchar_t; typedef char char_t; typedef uchar_t* uchar_ptr; typedef char_t* char_ptr; typedef uchar_t const* uchar_cptr; typedef char_t const* char_cptr; inline bool is_whitespace(const char_t c) { return (' ' == c) || ('\n' == c) || ('\r' == c) || ('\t' == c) || ('\b' == c) || ('\v' == c) || ('\f' == c) ; } inline bool is_operator_char(const char_t c) { return ('+' == c) || ('-' == c) || ('*' == c) || ('/' == c) || ('^' == c) || ('<' == c) || ('>' == c) || ('=' == c) || (',' == c) || ('!' == c) || ('(' == c) || (')' == c) || ('[' == c) || (']' == c) || ('{' == c) || ('}' == c) || ('%' == c) || (':' == c) || ('?' == c) || ('&' == c) || ('|' == c) || (';' == c) ; } inline bool is_letter(const char_t c) { return (('a' <= c) && (c <= 'z')) || (('A' <= c) && (c <= 'Z')) ; } inline bool is_digit(const char_t c) { return ('0' <= c) && (c <= '9'); } inline bool is_letter_or_digit(const char_t c) { return is_letter(c) || is_digit(c); } inline bool is_left_bracket(const char_t c) { return ('(' == c) || ('[' == c) || ('{' == c); } inline bool is_right_bracket(const char_t c) { return (')' == c) || (']' == c) || ('}' == c); } inline bool is_bracket(const char_t c) { return is_left_bracket(c) || is_right_bracket(c); } inline bool is_sign(const char_t c) { return ('+' == c) || ('-' == c); } inline bool is_invalid(const char_t c) { return !is_whitespace (c) && !is_operator_char(c) && !is_letter (c) && !is_digit (c) && ('.' != c) && ('_' != c) && ('$' != c) && ('~' != c) && ('\'' != c); } #ifndef exprtk_disable_caseinsensitivity inline void case_normalise(std::string& s) { for (std::size_t i = 0; i < s.size(); ++i) { s[i] = static_cast<std::string::value_type>(std::tolower(s[i])); } } inline bool imatch(const char_t c1, const char_t c2) { return std::tolower(c1) == std::tolower(c2); } inline bool imatch(const std::string& s1, const std::string& s2) { if (s1.size() == s2.size()) { for (std::size_t i = 0; i < s1.size(); ++i) { if (std::tolower(s1[i]) != std::tolower(s2[i])) { return false; } } return true; } return false; } struct ilesscompare { inline bool operator() (const std::string& s1, const std::string& s2) const { const std::size_t length = std::min(s1.size(),s2.size()); for (std::size_t i = 0; i < length; ++i) { const char_t c1 = static_cast<char>(std::tolower(s1[i])); const char_t c2 = static_cast<char>(std::tolower(s2[i])); if (c1 > c2) return false; else if (c1 < c2) return true; } return s1.size() < s2.size(); } }; #else inline void case_normalise(std::string&) {} inline bool imatch(const char_t c1, const char_t c2) { return c1 == c2; } inline bool imatch(const std::string& s1, const std::string& s2) { return s1 == s2; } struct ilesscompare { inline bool operator() (const std::string& s1, const std::string& s2) const { return s1 < s2; } }; #endif inline bool is_valid_sf_symbol(const std::string& symbol) { // Special function: $f12 or $F34 return (4 == symbol.size()) && ('$' == symbol[0]) && imatch('f',symbol[1]) && is_digit(symbol[2]) && is_digit(symbol[3]); } inline const char_t& front(const std::string& s) { return s[0]; } inline const char_t& back(const std::string& s) { return s[s.size() - 1]; } inline std::string to_str(int i) { if (0 == i) return std::string("0"); std::string result; if (i < 0) { for ( ; i; i /= 10) { result += '0' + char(-(i % 10)); } result += '-'; } else { for ( ; i; i /= 10) { result += '0' + char(i % 10); } } std::reverse(result.begin(), result.end()); return result; } inline std::string to_str(std::size_t i) { return to_str(static_cast<int>(i)); } inline bool is_hex_digit(const std::string::value_type digit) { return (('0' <= digit) && (digit <= '9')) || (('A' <= digit) && (digit <= 'F')) || (('a' <= digit) && (digit <= 'f')) ; } inline uchar_t hex_to_bin(uchar_t h) { if (('0' <= h) && (h <= '9')) return (h - '0'); else return static_cast<unsigned char>(std::toupper(h) - 'A'); } template <typename Iterator> inline void parse_hex(Iterator& itr, Iterator end, std::string::value_type& result) { if ( (end != (itr )) && (end != (itr + 1)) && (end != (itr + 2)) && (end != (itr + 3)) && ('0' == *(itr )) && ( ('x' == *(itr + 1)) || ('X' == *(itr + 1)) ) && (is_hex_digit(*(itr + 2))) && (is_hex_digit(*(itr + 3))) ) { result = hex_to_bin(static_cast<uchar_t>(*(itr + 2))) << 4 | hex_to_bin(static_cast<uchar_t>(*(itr + 3))) ; itr += 3; } else result = '\0'; } inline void cleanup_escapes(std::string& s) { typedef std::string::iterator str_itr_t; str_itr_t itr1 = s.begin(); str_itr_t itr2 = s.begin(); str_itr_t end = s.end (); std::size_t removal_count = 0; while (end != itr1) { if ('\\' == (*itr1)) { ++removal_count; if (end == ++itr1) break; else if ('\\' != (*itr1)) { switch (*itr1) { case 'n' : (*itr1) = '\n'; break; case 'r' : (*itr1) = '\r'; break; case 't' : (*itr1) = '\t'; break; case '0' : parse_hex(itr1, end, (*itr1)); removal_count += 3; break; } continue; } } if (itr1 != itr2) { (*itr2) = (*itr1); } ++itr1; ++itr2; } s.resize(s.size() - removal_count); } class build_string { public: build_string(const std::size_t& initial_size = 64) { data_.reserve(initial_size); } inline build_string& operator << (const std::string& s) { data_ += s; return (*this); } inline build_string& operator << (char_cptr s) { data_ += std::string(s); return (*this); } inline operator std::string () const { return data_; } inline std::string as_string() const { return data_; } private: std::string data_; }; static const std::string reserved_words[] = { "break", "case", "continue", "default", "false", "for", "if", "else", "ilike", "in", "like", "and", "nand", "nor", "not", "null", "or", "repeat", "return", "shl", "shr", "swap", "switch", "true", "until", "var", "while", "xnor", "xor", "&", "|" }; static const std::size_t reserved_words_size = sizeof(reserved_words) / sizeof(std::string); static const std::string reserved_symbols[] = { "abs", "acos", "acosh", "and", "asin", "asinh", "atan", "atanh", "atan2", "avg", "break", "case", "ceil", "clamp", "continue", "cos", "cosh", "cot", "csc", "default", "deg2grad", "deg2rad", "equal", "erf", "erfc", "exp", "expm1", "false", "floor", "for", "frac", "grad2deg", "hypot", "iclamp", "if", "else", "ilike", "in", "inrange", "like", "log", "log10", "log2", "logn", "log1p", "mand", "max", "min", "mod", "mor", "mul", "ncdf", "nand", "nor", "not", "not_equal", "null", "or", "pow", "rad2deg", "repeat", "return", "root", "round", "roundn", "sec", "sgn", "shl", "shr", "sin", "sinc", "sinh", "sqrt", "sum", "swap", "switch", "tan", "tanh", "true", "trunc", "until", "var", "while", "xnor", "xor", "&", "|" }; static const std::size_t reserved_symbols_size = sizeof(reserved_symbols) / sizeof(std::string); static const std::string base_function_list[] = { "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2", "avg", "ceil", "clamp", "cos", "cosh", "cot", "csc", "equal", "erf", "erfc", "exp", "expm1", "floor", "frac", "hypot", "iclamp", "like", "log", "log10", "log2", "logn", "log1p", "mand", "max", "min", "mod", "mor", "mul", "ncdf", "pow", "root", "round", "roundn", "sec", "sgn", "sin", "sinc", "sinh", "sqrt", "sum", "swap", "tan", "tanh", "trunc", "not_equal", "inrange", "deg2grad", "deg2rad", "rad2deg", "grad2deg" }; static const std::size_t base_function_list_size = sizeof(base_function_list) / sizeof(std::string); static const std::string logic_ops_list[] = { "and", "nand", "nor", "not", "or", "xnor", "xor", "&", "|" }; static const std::size_t logic_ops_list_size = sizeof(logic_ops_list) / sizeof(std::string); static const std::string cntrl_struct_list[] = { "if", "switch", "for", "while", "repeat", "return" }; static const std::size_t cntrl_struct_list_size = sizeof(cntrl_struct_list) / sizeof(std::string); static const std::string arithmetic_ops_list[] = { "+", "-", "*", "/", "%", "^" }; static const std::size_t arithmetic_ops_list_size = sizeof(arithmetic_ops_list) / sizeof(std::string); static const std::string assignment_ops_list[] = { ":=", "+=", "-=", "*=", "/=", "%=" }; static const std::size_t assignment_ops_list_size = sizeof(assignment_ops_list) / sizeof(std::string); static const std::string inequality_ops_list[] = { "<", "<=", "==", "=", "!=", "<>", ">=", ">" }; static const std::size_t inequality_ops_list_size = sizeof(inequality_ops_list) / sizeof(std::string); inline bool is_reserved_word(const std::string& symbol) { for (std::size_t i = 0; i < reserved_words_size; ++i) { if (imatch(symbol, reserved_words[i])) { return true; } } return false; } inline bool is_reserved_symbol(const std::string& symbol) { for (std::size_t i = 0; i < reserved_symbols_size; ++i) { if (imatch(symbol, reserved_symbols[i])) { return true; } } return false; } inline bool is_base_function(const std::string& function_name) { for (std::size_t i = 0; i < base_function_list_size; ++i) { if (imatch(function_name, base_function_list[i])) { return true; } } return false; } inline bool is_control_struct(const std::string& cntrl_strct) { for (std::size_t i = 0; i < cntrl_struct_list_size; ++i) { if (imatch(cntrl_strct, cntrl_struct_list[i])) { return true; } } return false; } inline bool is_logic_opr(const std::string& lgc_opr) { for (std::size_t i = 0; i < logic_ops_list_size; ++i) { if (imatch(lgc_opr, logic_ops_list[i])) { return true; } } return false; } struct cs_match { static inline bool cmp(const char_t c0, const char_t c1) { return (c0 == c1); } }; struct cis_match { static inline bool cmp(const char_t c0, const char_t c1) { return (std::tolower(c0) == std::tolower(c1)); } }; template <typename Iterator, typename Compare> inline bool match_impl(const Iterator pattern_begin, const Iterator pattern_end, const Iterator data_begin, const Iterator data_end, const typename std::iterator_traits<Iterator>::value_type& zero_or_more, const typename std::iterator_traits<Iterator>::value_type& zero_or_one) { Iterator d_itr = data_begin; Iterator p_itr = pattern_begin; while ((p_itr != pattern_end) && (d_itr != data_end)) { if (zero_or_more == *p_itr) { while ((p_itr != pattern_end) && (*p_itr == zero_or_more || *p_itr == zero_or_one)) { ++p_itr; } if (p_itr == pattern_end) return true; const typename std::iterator_traits<Iterator>::value_type c = *(p_itr++); while ((d_itr != data_end) && !Compare::cmp(c,*d_itr)) { ++d_itr; } ++d_itr; } else if ((*p_itr == zero_or_one) || Compare::cmp(*p_itr, *d_itr)) { ++d_itr; ++p_itr; } else return false; } if (d_itr != data_end) return false; else if (p_itr == pattern_end) return true; else if ((zero_or_more == *p_itr) || (zero_or_one == *p_itr)) ++p_itr; return pattern_end == p_itr; } inline bool wc_match(const std::string& wild_card, const std::string& str) { return match_impl<char_cptr,cs_match>(wild_card.data(), wild_card.data() + wild_card.size(), str.data(), str.data() + str.size(), '*', '?'); } inline bool wc_imatch(const std::string& wild_card, const std::string& str) { return match_impl<char_cptr,cis_match>(wild_card.data(), wild_card.data() + wild_card.size(), str.data(), str.data() + str.size(), '*', '?'); } inline bool sequence_match(const std::string& pattern, const std::string& str, std::size_t& diff_index, char_t& diff_value) { if (str.empty()) { return ("Z" == pattern); } else if ('*' == pattern[0]) return false; typedef std::string::const_iterator itr_t; itr_t p_itr = pattern.begin(); itr_t s_itr = str .begin(); itr_t p_end = pattern.end(); itr_t s_end = str .end(); while ((s_end != s_itr) && (p_end != p_itr)) { if ('*' == (*p_itr)) { const char_t target = static_cast<char>(std::toupper(*(p_itr - 1))); if ('*' == target) { diff_index = static_cast<std::size_t>(std::distance(str.begin(),s_itr)); diff_value = static_cast<char>(std::toupper(*p_itr)); return false; } else ++p_itr; while (s_itr != s_end) { if (target != std::toupper(*s_itr)) break; else ++s_itr; } continue; } else if ( ('?' != *p_itr) && std::toupper(*p_itr) != std::toupper(*s_itr) ) { diff_index = static_cast<std::size_t>(std::distance(str.begin(),s_itr)); diff_value = static_cast<char>(std::toupper(*p_itr)); return false; } ++p_itr; ++s_itr; } return ( (s_end == s_itr) && ( (p_end == p_itr) || ('*' == *p_itr) ) ); } static const double pow10[] = { 1.0, 1.0E+001, 1.0E+002, 1.0E+003, 1.0E+004, 1.0E+005, 1.0E+006, 1.0E+007, 1.0E+008, 1.0E+009, 1.0E+010, 1.0E+011, 1.0E+012, 1.0E+013, 1.0E+014, 1.0E+015, 1.0E+016 }; static const std::size_t pow10_size = sizeof(pow10) / sizeof(double); namespace numeric { namespace constant { static const double e = 2.71828182845904523536028747135266249775724709369996; static const double pi = 3.14159265358979323846264338327950288419716939937510; static const double pi_2 = 1.57079632679489661923132169163975144209858469968755; static const double pi_4 = 0.78539816339744830961566084581987572104929234984378; static const double pi_180 = 0.01745329251994329576923690768488612713442871888542; static const double _1_pi = 0.31830988618379067153776752674502872406891929148091; static const double _2_pi = 0.63661977236758134307553505349005744813783858296183; static const double _180_pi = 57.29577951308232087679815481410517033240547246656443; static const double log2 = 0.69314718055994530941723212145817656807550013436026; static const double sqrt2 = 1.41421356237309504880168872420969807856967187537695; } namespace details { struct unknown_type_tag { unknown_type_tag() {} }; struct real_type_tag { real_type_tag () {} }; struct complex_type_tag { complex_type_tag() {} }; struct int_type_tag { int_type_tag () {} }; template <typename T> struct number_type { typedef unknown_type_tag type; number_type() {} }; #define exprtk_register_real_type_tag(T) \ template<> struct number_type<T> \ { typedef real_type_tag type; number_type() {} }; \ #define exprtk_register_complex_type_tag(T) \ template<> struct number_type<std::complex<T> > \ { typedef complex_type_tag type; number_type() {} }; \ #define exprtk_register_int_type_tag(T) \ template<> struct number_type<T> \ { typedef int_type_tag type; number_type() {} }; \ exprtk_register_real_type_tag(double ) exprtk_register_real_type_tag(long double) exprtk_register_real_type_tag(float ) exprtk_register_complex_type_tag(double ) exprtk_register_complex_type_tag(long double) exprtk_register_complex_type_tag(float ) exprtk_register_int_type_tag(short ) exprtk_register_int_type_tag(int ) exprtk_register_int_type_tag(long long int ) exprtk_register_int_type_tag(unsigned short ) exprtk_register_int_type_tag(unsigned int ) exprtk_register_int_type_tag(unsigned long long int) #undef exprtk_register_real_type_tag #undef exprtk_register_int_type_tag template <typename T> struct epsilon_type { static inline T value() { const T epsilon = T(0.0000000001); return epsilon; } }; template <> struct epsilon_type <float> { static inline float value() { const float epsilon = float(0.000001f); return epsilon; } }; template <> struct epsilon_type <long double> { static inline long double value() { const long double epsilon = (long double)(0.000000000001); return epsilon; } }; template <typename T> inline bool is_nan_impl(const T v, real_type_tag) { return std::not_equal_to<T>()(v,v); } template <typename T> inline int to_int32_impl(const T v, real_type_tag) { return static_cast<int>(v); } template <typename T> inline long long int to_int64_impl(const T v, real_type_tag) { return static_cast<long long int>(v); } template <typename T> inline bool is_true_impl(const T v) { return std::not_equal_to<T>()(T(0),v); } template <typename T> inline bool is_false_impl(const T v) { return std::equal_to<T>()(T(0),v); } template <typename T> inline T abs_impl(const T v, real_type_tag) { return ((v < T(0)) ? -v : v); } template <typename T> inline T min_impl(const T v0, const T v1, real_type_tag) { return std::min<T>(v0,v1); } template <typename T> inline T max_impl(const T v0, const T v1, real_type_tag) { return std::max<T>(v0,v1); } template <typename T> inline T equal_impl(const T v0, const T v1, real_type_tag) { const T epsilon = epsilon_type<T>::value(); return (abs_impl(v0 - v1,real_type_tag()) <= (std::max(T(1),std::max(abs_impl(v0,real_type_tag()),abs_impl(v1,real_type_tag()))) * epsilon)) ? T(1) : T(0); } inline float equal_impl(const float v0, const float v1, real_type_tag) { const float epsilon = epsilon_type<float>::value(); return (abs_impl(v0 - v1,real_type_tag()) <= (std::max(1.0f,std::max(abs_impl(v0,real_type_tag()),abs_impl(v1,real_type_tag()))) * epsilon)) ? 1.0f : 0.0f; } template <typename T> inline T equal_impl(const T v0, const T v1, int_type_tag) { return (v0 == v1) ? 1 : 0; } template <typename T> inline T expm1_impl(const T v, real_type_tag) { // return std::expm1<T>(v); if (abs_impl(v,real_type_tag()) < T(0.00001)) return v + (T(0.5) * v * v); else return std::exp(v) - T(1); } template <typename T> inline T expm1_impl(const T v, int_type_tag) { return T(std::exp<double>(v)) - T(1); } template <typename T> inline T nequal_impl(const T v0, const T v1, real_type_tag) { typedef real_type_tag rtg; const T epsilon = epsilon_type<T>::value(); return (abs_impl(v0 - v1,rtg()) > (std::max(T(1),std::max(abs_impl(v0,rtg()),abs_impl(v1,rtg()))) * epsilon)) ? T(1) : T(0); } inline float nequal_impl(const float v0, const float v1, real_type_tag) { typedef real_type_tag rtg; const float epsilon = epsilon_type<float>::value(); return (abs_impl(v0 - v1,rtg()) > (std::max(1.0f,std::max(abs_impl(v0,rtg()),abs_impl(v1,rtg()))) * epsilon)) ? 1.0f : 0.0f; } template <typename T> inline T nequal_impl(const T v0, const T v1, int_type_tag) { return (v0 != v1) ? 1 : 0; } template <typename T> inline T modulus_impl(const T v0, const T v1, real_type_tag) { return std::fmod(v0,v1); } template <typename T> inline T modulus_impl(const T v0, const T v1, int_type_tag) { return v0 % v1; } template <typename T> inline T pow_impl(const T v0, const T v1, real_type_tag) { return std::pow(v0,v1); } template <typename T> inline T pow_impl(const T v0, const T v1, int_type_tag) { return std::pow(static_cast<double>(v0),static_cast<double>(v1)); } template <typename T> inline T logn_impl(const T v0, const T v1, real_type_tag) { return std::log(v0) / std::log(v1); } template <typename T> inline T logn_impl(const T v0, const T v1, int_type_tag) { return static_cast<T>(logn_impl<double>(static_cast<double>(v0),static_cast<double>(v1),real_type_tag())); } template <typename T> inline T log1p_impl(const T v, real_type_tag) { if (v > T(-1)) { if (abs_impl(v,real_type_tag()) > T(0.0001)) { return std::log(T(1) + v); } else return (T(-0.5) * v + T(1)) * v; } else return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T log1p_impl(const T v, int_type_tag) { if (v > T(-1)) { return std::log(T(1) + v); } else return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T root_impl(const T v0, const T v1, real_type_tag) { if (v1 < T(0)) return std::numeric_limits<T>::quiet_NaN(); const std::size_t n = static_cast<std::size_t>(v1); if ((v0 < T(0)) && (0 == (n % 2))) return std::numeric_limits<T>::quiet_NaN(); return std::pow(v0, T(1) / n); } template <typename T> inline T root_impl(const T v0, const T v1, int_type_tag) { return root_impl<double>(static_cast<double>(v0),static_cast<double>(v1),real_type_tag()); } template <typename T> inline T round_impl(const T v, real_type_tag) { return ((v < T(0)) ? std::ceil(v - T(0.5)) : std::floor(v + T(0.5))); } template <typename T> inline T roundn_impl(const T v0, const T v1, real_type_tag) { const int index = std::max<int>(0, std::min<int>(pow10_size - 1, (int)std::floor(v1))); const T p10 = T(pow10[index]); if (v0 < T(0)) return T(std::ceil ((v0 * p10) - T(0.5)) / p10); else return T(std::floor((v0 * p10) + T(0.5)) / p10); } template <typename T> inline T roundn_impl(const T v0, const T, int_type_tag) { return v0; } template <typename T> inline T hypot_impl(const T v0, const T v1, real_type_tag) { return std::sqrt((v0 * v0) + (v1 * v1)); } template <typename T> inline T hypot_impl(const T v0, const T v1, int_type_tag) { return static_cast<T>(std::sqrt(static_cast<double>((v0 * v0) + (v1 * v1)))); } template <typename T> inline T atan2_impl(const T v0, const T v1, real_type_tag) { return std::atan2(v0,v1); } template <typename T> inline T atan2_impl(const T, const T, int_type_tag) { return 0; } template <typename T> inline T shr_impl(const T v0, const T v1, real_type_tag) { return v0 * (T(1) / std::pow(T(2),static_cast<T>(static_cast<int>(v1)))); } template <typename T> inline T shr_impl(const T v0, const T v1, int_type_tag) { return v0 >> v1; } template <typename T> inline T shl_impl(const T v0, const T v1, real_type_tag) { return v0 * std::pow(T(2),static_cast<T>(static_cast<int>(v1))); } template <typename T> inline T shl_impl(const T v0, const T v1, int_type_tag) { return v0 << v1; } template <typename T> inline T sgn_impl(const T v, real_type_tag) { if (v > T(0)) return T(+1); else if (v < T(0)) return T(-1); else return T( 0); } template <typename T> inline T sgn_impl(const T v, int_type_tag) { if (v > T(0)) return T(+1); else if (v < T(0)) return T(-1); else return T( 0); } template <typename T> inline T and_impl(const T v0, const T v1, real_type_tag) { return (is_true_impl(v0) && is_true_impl(v1)) ? T(1) : T(0); } template <typename T> inline T and_impl(const T v0, const T v1, int_type_tag) { return v0 && v1; } template <typename T> inline T nand_impl(const T v0, const T v1, real_type_tag) { return (is_false_impl(v0) || is_false_impl(v1)) ? T(1) : T(0); } template <typename T> inline T nand_impl(const T v0, const T v1, int_type_tag) { return !(v0 && v1); } template <typename T> inline T or_impl(const T v0, const T v1, real_type_tag) { return (is_true_impl(v0) || is_true_impl(v1)) ? T(1) : T(0); } template <typename T> inline T or_impl(const T v0, const T v1, int_type_tag) { return (v0 || v1); } template <typename T> inline T nor_impl(const T v0, const T v1, real_type_tag) { return (is_false_impl(v0) && is_false_impl(v1)) ? T(1) : T(0); } template <typename T> inline T nor_impl(const T v0, const T v1, int_type_tag) { return !(v0 || v1); } template <typename T> inline T xor_impl(const T v0, const T v1, real_type_tag) { return (is_false_impl(v0) != is_false_impl(v1)) ? T(1) : T(0); } template <typename T> inline T xor_impl(const T v0, const T v1, int_type_tag) { return v0 ^ v1; } template <typename T> inline T xnor_impl(const T v0, const T v1, real_type_tag) { const bool v0_true = is_true_impl(v0); const bool v1_true = is_true_impl(v1); if ((v0_true && v1_true) || (!v0_true && !v1_true)) return T(1); else return T(0); } template <typename T> inline T xnor_impl(const T v0, const T v1, int_type_tag) { const bool v0_true = is_true_impl(v0); const bool v1_true = is_true_impl(v1); if ((v0_true && v1_true) || (!v0_true && !v1_true)) return T(1); else return T(0); } #if (defined(_MSC_VER) && (_MSC_VER >= 1900)) || !defined(_MSC_VER) #define exprtk_define_erf(TT,impl) \ inline TT erf_impl(TT v) { return impl(v); } \ exprtk_define_erf( float,::erff) exprtk_define_erf( double,::erf ) exprtk_define_erf(long double,::erfl) #undef exprtk_define_erf #endif template <typename T> inline T erf_impl(T v, real_type_tag) { #if defined(_MSC_VER) && (_MSC_VER < 1900) // Credits: Abramowitz & Stegun Equations 7.1.25-28 static const T c[] = { T( 1.26551223), T(1.00002368), T( 0.37409196), T(0.09678418), T(-0.18628806), T(0.27886807), T(-1.13520398), T(1.48851587), T(-0.82215223), T(0.17087277) }; const T t = T(1) / (T(1) + T(0.5) * abs_impl(v,real_type_tag())); T result = T(1) - t * std::exp((-v * v) - c[0] + t * (c[1] + t * (c[2] + t * (c[3] + t * (c[4] + t * (c[5] + t * (c[6] + t * (c[7] + t * (c[8] + t * (c[9])))))))))); return (v >= T(0)) ? result : -result; #else return erf_impl(v); #endif } template <typename T> inline T erf_impl(T v, int_type_tag) { return erf_impl(static_cast<double>(v),real_type_tag()); } #if (defined(_MSC_VER) && (_MSC_VER >= 1900)) || !defined(_MSC_VER) #define exprtk_define_erfc(TT,impl) \ inline TT erfc_impl(TT v) { return impl(v); } \ exprtk_define_erfc( float,::erfcf) exprtk_define_erfc( double,::erfc ) exprtk_define_erfc(long double,::erfcl) #undef exprtk_define_erfc #endif template <typename T> inline T erfc_impl(T v, real_type_tag) { #if defined(_MSC_VER) && (_MSC_VER < 1900) return T(1) - erf_impl(v,real_type_tag()); #else return erfc_impl(v); #endif } template <typename T> inline T erfc_impl(T v, int_type_tag) { return erfc_impl(static_cast<double>(v),real_type_tag()); } template <typename T> inline T ncdf_impl(T v, real_type_tag) { T cnd = T(0.5) * (T(1) + erf_impl( abs_impl(v,real_type_tag()) / T(numeric::constant::sqrt2),real_type_tag())); return (v < T(0)) ? (T(1) - cnd) : cnd; } template <typename T> inline T ncdf_impl(T v, int_type_tag) { return ncdf_impl(static_cast<double>(v),real_type_tag()); } template <typename T> inline T sinc_impl(T v, real_type_tag) { if (std::abs(v) >= std::numeric_limits<T>::epsilon()) return(std::sin(v) / v); else return T(1); } template <typename T> inline T sinc_impl(T v, int_type_tag) { return sinc_impl(static_cast<double>(v),real_type_tag()); } template <typename T> inline T acos_impl(const T v, real_type_tag) { return std::acos (v); } template <typename T> inline T acosh_impl(const T v, real_type_tag) { return std::log(v + std::sqrt((v * v) - T(1))); } template <typename T> inline T asin_impl(const T v, real_type_tag) { return std::asin (v); } template <typename T> inline T asinh_impl(const T v, real_type_tag) { return std::log(v + std::sqrt((v * v) + T(1))); } template <typename T> inline T atan_impl(const T v, real_type_tag) { return std::atan (v); } template <typename T> inline T atanh_impl(const T v, real_type_tag) { return (std::log(T(1) + v) - std::log(T(1) - v)) / T(2); } template <typename T> inline T ceil_impl(const T v, real_type_tag) { return std::ceil (v); } template <typename T> inline T cos_impl(const T v, real_type_tag) { return std::cos (v); } template <typename T> inline T cosh_impl(const T v, real_type_tag) { return std::cosh (v); } template <typename T> inline T exp_impl(const T v, real_type_tag) { return std::exp (v); } template <typename T> inline T floor_impl(const T v, real_type_tag) { return std::floor(v); } template <typename T> inline T log_impl(const T v, real_type_tag) { return std::log (v); } template <typename T> inline T log10_impl(const T v, real_type_tag) { return std::log10(v); } template <typename T> inline T log2_impl(const T v, real_type_tag) { return std::log(v)/T(numeric::constant::log2); } template <typename T> inline T neg_impl(const T v, real_type_tag) { return -v; } template <typename T> inline T pos_impl(const T v, real_type_tag) { return +v; } template <typename T> inline T sin_impl(const T v, real_type_tag) { return std::sin (v); } template <typename T> inline T sinh_impl(const T v, real_type_tag) { return std::sinh (v); } template <typename T> inline T sqrt_impl(const T v, real_type_tag) { return std::sqrt (v); } template <typename T> inline T tan_impl(const T v, real_type_tag) { return std::tan (v); } template <typename T> inline T tanh_impl(const T v, real_type_tag) { return std::tanh (v); } template <typename T> inline T cot_impl(const T v, real_type_tag) { return T(1) / std::tan(v); } template <typename T> inline T sec_impl(const T v, real_type_tag) { return T(1) / std::cos(v); } template <typename T> inline T csc_impl(const T v, real_type_tag) { return T(1) / std::sin(v); } template <typename T> inline T r2d_impl(const T v, real_type_tag) { return (v * T(numeric::constant::_180_pi)); } template <typename T> inline T d2r_impl(const T v, real_type_tag) { return (v * T(numeric::constant::pi_180)); } template <typename T> inline T d2g_impl(const T v, real_type_tag) { return (v * T(20.0/9.0)); } template <typename T> inline T g2d_impl(const T v, real_type_tag) { return (v * T(9.0/20.0)); } template <typename T> inline T notl_impl(const T v, real_type_tag) { return (std::not_equal_to<T>()(T(0),v) ? T(0) : T(1)); } template <typename T> inline T frac_impl(const T v, real_type_tag) { return (v - static_cast<long long>(v)); } template <typename T> inline T trunc_impl(const T v, real_type_tag) { return T(static_cast<long long>(v)); } template <typename T> inline T const_pi_impl(real_type_tag) { return T(numeric::constant::pi); } template <typename T> inline T const_e_impl (real_type_tag) { return T(numeric::constant::e); } template <typename T> inline T abs_impl(const T v, int_type_tag) { return ((v >= T(0)) ? v : -v); } template <typename T> inline T exp_impl(const T v, int_type_tag) { return std::exp (v); } template <typename T> inline T log_impl(const T v, int_type_tag) { return std::log (v); } template <typename T> inline T log10_impl(const T v, int_type_tag) { return std::log10(v); } template <typename T> inline T log2_impl(const T v, int_type_tag) { return std::log(v)/T(numeric::constant::log2); } template <typename T> inline T neg_impl(const T v, int_type_tag) { return -v; } template <typename T> inline T pos_impl(const T v, int_type_tag) { return +v; } template <typename T> inline T ceil_impl(const T v, int_type_tag) { return v; } template <typename T> inline T floor_impl(const T v, int_type_tag) { return v; } template <typename T> inline T round_impl(const T v, int_type_tag) { return v; } template <typename T> inline T notl_impl(const T v, int_type_tag) { return !v; } template <typename T> inline T sqrt_impl(const T v, int_type_tag) { return std::sqrt (v); } template <typename T> inline T frac_impl(const T , int_type_tag) { return T(0); } template <typename T> inline T trunc_impl(const T v, int_type_tag) { return v; } template <typename T> inline T acos_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T acosh_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T asin_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T asinh_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T atan_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T atanh_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T cos_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T cosh_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T sin_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T sinh_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T tan_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T tanh_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T cot_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T sec_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T csc_impl(const T , int_type_tag) { return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline bool is_integer_impl(const T& v, real_type_tag) { return std::equal_to<T>()(T(0),std::fmod(v,T(1))); } template <typename T> inline bool is_integer_impl(const T&, int_type_tag) { return true; } } template <typename Type> struct numeric_info { enum { length = 0, size = 32, bound_length = 0, min_exp = 0, max_exp = 0 }; }; template<> struct numeric_info<int> { enum { length = 10, size = 16, bound_length = 9}; }; template<> struct numeric_info<float> { enum { min_exp = -38, max_exp = +38}; }; template<> struct numeric_info<double> { enum { min_exp = -308, max_exp = +308}; }; template<> struct numeric_info<long double> { enum { min_exp = -308, max_exp = +308}; }; template <typename T> inline int to_int32(const T v) { const typename details::number_type<T>::type num_type; return to_int32_impl(v, num_type); } template <typename T> inline long long int to_int64(const T v) { const typename details::number_type<T>::type num_type; return to_int64_impl(v, num_type); } template <typename T> inline bool is_nan(const T v) { const typename details::number_type<T>::type num_type; return is_nan_impl(v, num_type); } template <typename T> inline T min(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return min_impl(v0, v1, num_type); } template <typename T> inline T max(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return max_impl(v0, v1, num_type); } template <typename T> inline T equal(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return equal_impl(v0, v1, num_type); } template <typename T> inline T nequal(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return nequal_impl(v0, v1, num_type); } template <typename T> inline T modulus(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return modulus_impl(v0, v1, num_type); } template <typename T> inline T pow(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return pow_impl(v0, v1, num_type); } template <typename T> inline T logn(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return logn_impl(v0, v1, num_type); } template <typename T> inline T root(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return root_impl(v0, v1, num_type); } template <typename T> inline T roundn(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return roundn_impl(v0, v1, num_type); } template <typename T> inline T hypot(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return hypot_impl(v0, v1, num_type); } template <typename T> inline T atan2(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return atan2_impl(v0, v1, num_type); } template <typename T> inline T shr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return shr_impl(v0, v1, num_type); } template <typename T> inline T shl(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return shl_impl(v0, v1, num_type); } template <typename T> inline T and_opr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return and_impl(v0, v1, num_type); } template <typename T> inline T nand_opr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return nand_impl(v0, v1, num_type); } template <typename T> inline T or_opr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return or_impl(v0, v1, num_type); } template <typename T> inline T nor_opr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return nor_impl(v0, v1, num_type); } template <typename T> inline T xor_opr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return xor_impl(v0, v1, num_type); } template <typename T> inline T xnor_opr(const T v0, const T v1) { const typename details::number_type<T>::type num_type; return xnor_impl(v0, v1, num_type); } template <typename T> inline bool is_integer(const T v) { const typename details::number_type<T>::type num_type; return is_integer_impl(v, num_type); } template <typename T, unsigned int N> struct fast_exp { static inline T result(T v) { unsigned int k = N; T l = T(1); while (k) { if (k & 1) { l *= v; --k; } v *= v; k >>= 1; } return l; } }; template <typename T> struct fast_exp<T,10> { static inline T result(T v) { T v_5 = fast_exp<T,5>::result(v); return v_5 * v_5; } }; template <typename T> struct fast_exp<T, 9> { static inline T result(T v) { return fast_exp<T,8>::result(v) * v; } }; template <typename T> struct fast_exp<T, 8> { static inline T result(T v) { T v_4 = fast_exp<T,4>::result(v); return v_4 * v_4; } }; template <typename T> struct fast_exp<T, 7> { static inline T result(T v) { return fast_exp<T,6>::result(v) * v; } }; template <typename T> struct fast_exp<T, 6> { static inline T result(T v) { T v_3 = fast_exp<T,3>::result(v); return v_3 * v_3; } }; template <typename T> struct fast_exp<T, 5> { static inline T result(T v) { return fast_exp<T,4>::result(v) * v; } }; template <typename T> struct fast_exp<T, 4> { static inline T result(T v) { T v_2 = v * v; return v_2 * v_2; } }; template <typename T> struct fast_exp<T, 3> { static inline T result(T v) { return v * v * v; } }; template <typename T> struct fast_exp<T, 2> { static inline T result(T v) { return v * v; } }; template <typename T> struct fast_exp<T, 1> { static inline T result(T v) { return v; } }; template <typename T> struct fast_exp<T, 0> { static inline T result(T ) { return T(1); } }; #define exprtk_define_unary_function(FunctionName) \ template <typename T> \ inline T FunctionName (const T v) \ { \ const typename details::number_type<T>::type num_type; \ return FunctionName##_impl(v,num_type); \ } \ exprtk_define_unary_function(abs ) exprtk_define_unary_function(acos ) exprtk_define_unary_function(acosh) exprtk_define_unary_function(asin ) exprtk_define_unary_function(asinh) exprtk_define_unary_function(atan ) exprtk_define_unary_function(atanh) exprtk_define_unary_function(ceil ) exprtk_define_unary_function(cos ) exprtk_define_unary_function(cosh ) exprtk_define_unary_function(exp ) exprtk_define_unary_function(expm1) exprtk_define_unary_function(floor) exprtk_define_unary_function(log ) exprtk_define_unary_function(log10) exprtk_define_unary_function(log2 ) exprtk_define_unary_function(log1p) exprtk_define_unary_function(neg ) exprtk_define_unary_function(pos ) exprtk_define_unary_function(round) exprtk_define_unary_function(sin ) exprtk_define_unary_function(sinc ) exprtk_define_unary_function(sinh ) exprtk_define_unary_function(sqrt ) exprtk_define_unary_function(tan ) exprtk_define_unary_function(tanh ) exprtk_define_unary_function(cot ) exprtk_define_unary_function(sec ) exprtk_define_unary_function(csc ) exprtk_define_unary_function(r2d ) exprtk_define_unary_function(d2r ) exprtk_define_unary_function(d2g ) exprtk_define_unary_function(g2d ) exprtk_define_unary_function(notl ) exprtk_define_unary_function(sgn ) exprtk_define_unary_function(erf ) exprtk_define_unary_function(erfc ) exprtk_define_unary_function(ncdf ) exprtk_define_unary_function(frac ) exprtk_define_unary_function(trunc) #undef exprtk_define_unary_function } template <typename T> inline T compute_pow10(T d, const int exponent) { static const double fract10[] = { 0.0, 1.0E+001, 1.0E+002, 1.0E+003, 1.0E+004, 1.0E+005, 1.0E+006, 1.0E+007, 1.0E+008, 1.0E+009, 1.0E+010, 1.0E+011, 1.0E+012, 1.0E+013, 1.0E+014, 1.0E+015, 1.0E+016, 1.0E+017, 1.0E+018, 1.0E+019, 1.0E+020, 1.0E+021, 1.0E+022, 1.0E+023, 1.0E+024, 1.0E+025, 1.0E+026, 1.0E+027, 1.0E+028, 1.0E+029, 1.0E+030, 1.0E+031, 1.0E+032, 1.0E+033, 1.0E+034, 1.0E+035, 1.0E+036, 1.0E+037, 1.0E+038, 1.0E+039, 1.0E+040, 1.0E+041, 1.0E+042, 1.0E+043, 1.0E+044, 1.0E+045, 1.0E+046, 1.0E+047, 1.0E+048, 1.0E+049, 1.0E+050, 1.0E+051, 1.0E+052, 1.0E+053, 1.0E+054, 1.0E+055, 1.0E+056, 1.0E+057, 1.0E+058, 1.0E+059, 1.0E+060, 1.0E+061, 1.0E+062, 1.0E+063, 1.0E+064, 1.0E+065, 1.0E+066, 1.0E+067, 1.0E+068, 1.0E+069, 1.0E+070, 1.0E+071, 1.0E+072, 1.0E+073, 1.0E+074, 1.0E+075, 1.0E+076, 1.0E+077, 1.0E+078, 1.0E+079, 1.0E+080, 1.0E+081, 1.0E+082, 1.0E+083, 1.0E+084, 1.0E+085, 1.0E+086, 1.0E+087, 1.0E+088, 1.0E+089, 1.0E+090, 1.0E+091, 1.0E+092, 1.0E+093, 1.0E+094, 1.0E+095, 1.0E+096, 1.0E+097, 1.0E+098, 1.0E+099, 1.0E+100, 1.0E+101, 1.0E+102, 1.0E+103, 1.0E+104, 1.0E+105, 1.0E+106, 1.0E+107, 1.0E+108, 1.0E+109, 1.0E+110, 1.0E+111, 1.0E+112, 1.0E+113, 1.0E+114, 1.0E+115, 1.0E+116, 1.0E+117, 1.0E+118, 1.0E+119, 1.0E+120, 1.0E+121, 1.0E+122, 1.0E+123, 1.0E+124, 1.0E+125, 1.0E+126, 1.0E+127, 1.0E+128, 1.0E+129, 1.0E+130, 1.0E+131, 1.0E+132, 1.0E+133, 1.0E+134, 1.0E+135, 1.0E+136, 1.0E+137, 1.0E+138, 1.0E+139, 1.0E+140, 1.0E+141, 1.0E+142, 1.0E+143, 1.0E+144, 1.0E+145, 1.0E+146, 1.0E+147, 1.0E+148, 1.0E+149, 1.0E+150, 1.0E+151, 1.0E+152, 1.0E+153, 1.0E+154, 1.0E+155, 1.0E+156, 1.0E+157, 1.0E+158, 1.0E+159, 1.0E+160, 1.0E+161, 1.0E+162, 1.0E+163, 1.0E+164, 1.0E+165, 1.0E+166, 1.0E+167, 1.0E+168, 1.0E+169, 1.0E+170, 1.0E+171, 1.0E+172, 1.0E+173, 1.0E+174, 1.0E+175, 1.0E+176, 1.0E+177, 1.0E+178, 1.0E+179, 1.0E+180, 1.0E+181, 1.0E+182, 1.0E+183, 1.0E+184, 1.0E+185, 1.0E+186, 1.0E+187, 1.0E+188, 1.0E+189, 1.0E+190, 1.0E+191, 1.0E+192, 1.0E+193, 1.0E+194, 1.0E+195, 1.0E+196, 1.0E+197, 1.0E+198, 1.0E+199, 1.0E+200, 1.0E+201, 1.0E+202, 1.0E+203, 1.0E+204, 1.0E+205, 1.0E+206, 1.0E+207, 1.0E+208, 1.0E+209, 1.0E+210, 1.0E+211, 1.0E+212, 1.0E+213, 1.0E+214, 1.0E+215, 1.0E+216, 1.0E+217, 1.0E+218, 1.0E+219, 1.0E+220, 1.0E+221, 1.0E+222, 1.0E+223, 1.0E+224, 1.0E+225, 1.0E+226, 1.0E+227, 1.0E+228, 1.0E+229, 1.0E+230, 1.0E+231, 1.0E+232, 1.0E+233, 1.0E+234, 1.0E+235, 1.0E+236, 1.0E+237, 1.0E+238, 1.0E+239, 1.0E+240, 1.0E+241, 1.0E+242, 1.0E+243, 1.0E+244, 1.0E+245, 1.0E+246, 1.0E+247, 1.0E+248, 1.0E+249, 1.0E+250, 1.0E+251, 1.0E+252, 1.0E+253, 1.0E+254, 1.0E+255, 1.0E+256, 1.0E+257, 1.0E+258, 1.0E+259, 1.0E+260, 1.0E+261, 1.0E+262, 1.0E+263, 1.0E+264, 1.0E+265, 1.0E+266, 1.0E+267, 1.0E+268, 1.0E+269, 1.0E+270, 1.0E+271, 1.0E+272, 1.0E+273, 1.0E+274, 1.0E+275, 1.0E+276, 1.0E+277, 1.0E+278, 1.0E+279, 1.0E+280, 1.0E+281, 1.0E+282, 1.0E+283, 1.0E+284, 1.0E+285, 1.0E+286, 1.0E+287, 1.0E+288, 1.0E+289, 1.0E+290, 1.0E+291, 1.0E+292, 1.0E+293, 1.0E+294, 1.0E+295, 1.0E+296, 1.0E+297, 1.0E+298, 1.0E+299, 1.0E+300, 1.0E+301, 1.0E+302, 1.0E+303, 1.0E+304, 1.0E+305, 1.0E+306, 1.0E+307, 1.0E+308 }; static const int fract10_size = static_cast<int>(sizeof(fract10) / sizeof(double)); const int e = std::abs(exponent); if (exponent >= std::numeric_limits<T>::min_exponent10) { if (e < fract10_size) { if (exponent > 0) return T(d * fract10[e]); else return T(d / fract10[e]); } else return T(d * std::pow(10.0, 10.0 * exponent)); } else { d /= T(fract10[ -std::numeric_limits<T>::min_exponent10]); return T(d / fract10[-exponent + std::numeric_limits<T>::min_exponent10]); } } template <typename Iterator, typename T> inline bool string_to_type_converter_impl_ref(Iterator& itr, const Iterator end, T& result) { if (itr == end) return false; const bool negative = ('-' == (*itr)); if (negative || ('+' == (*itr))) { if (end == ++itr) return false; } static const uchar_t zero = static_cast<uchar_t>('0'); while ((end != itr) && (zero == (*itr))) ++itr; bool return_result = true; unsigned int digit = 0; const std::size_t length = static_cast<std::size_t>(std::distance(itr,end)); if (length <= 4) { exprtk_disable_fallthrough_begin switch (length) { #ifdef exprtk_use_lut #define exprtk_process_digit \ if ((digit = details::digit_table[(int)*itr++]) < 10) \ result = result * 10 + (digit); \ else \ { \ return_result = false; \ break; \ } \ #else #define exprtk_process_digit \ if ((digit = (*itr++ - zero)) < 10) \ result = result * T(10) + digit; \ else \ { \ return_result = false; \ break; \ } \ #endif case 4 : exprtk_process_digit case 3 : exprtk_process_digit case 2 : exprtk_process_digit case 1 : if ((digit = (*itr - zero))>= 10) { digit = 0; return_result = false; } #undef exprtk_process_digit } exprtk_disable_fallthrough_end } else return_result = false; if (length && return_result) { result = result * 10 + static_cast<T>(digit); ++itr; } result = negative ? -result : result; return return_result; } template <typename Iterator, typename T> static inline bool parse_nan(Iterator& itr, const Iterator end, T& t) { typedef typename std::iterator_traits<Iterator>::value_type type; static const std::size_t nan_length = 3; if (std::distance(itr,end) != static_cast<int>(nan_length)) return false; if (static_cast<type>('n') == (*itr)) { if ( (static_cast<type>('a') != *(itr + 1)) || (static_cast<type>('n') != *(itr + 2)) ) { return false; } } else if ( (static_cast<type>('A') != *(itr + 1)) || (static_cast<type>('N') != *(itr + 2)) ) { return false; } t = std::numeric_limits<T>::quiet_NaN(); return true; } template <typename Iterator, typename T> static inline bool parse_inf(Iterator& itr, const Iterator end, T& t, bool negative) { static const char_t inf_uc[] = "INFINITY"; static const char_t inf_lc[] = "infinity"; static const std::size_t inf_length = 8; const std::size_t length = static_cast<std::size_t>(std::distance(itr,end)); if ((3 != length) && (inf_length != length)) return false; char_cptr inf_itr = ('i' == (*itr)) ? inf_lc : inf_uc; while (end != itr) { if (*inf_itr == static_cast<char>(*itr)) { ++itr; ++inf_itr; continue; } else return false; } if (negative) t = -std::numeric_limits<T>::infinity(); else t = std::numeric_limits<T>::infinity(); return true; } template <typename Iterator, typename T> inline bool string_to_real(Iterator& itr_external, const Iterator end, T& t, numeric::details::real_type_tag) { if (end == itr_external) return false; Iterator itr = itr_external; T d = T(0); const bool negative = ('-' == (*itr)); if (negative || '+' == (*itr)) { if (end == ++itr) return false; } bool instate = false; static const char zero = static_cast<uchar_t>('0'); #define parse_digit_1(d) \ if ((digit = (*itr - zero)) < 10) \ { d = d * T(10) + digit; } \ else \ { break; } \ if (end == ++itr) break; \ #define parse_digit_2(d) \ if ((digit = (*itr - zero)) < 10) \ { d = d * T(10) + digit; } \ else { break; } \ ++itr; \ if ('.' != (*itr)) { const Iterator curr = itr; while ((end != itr) && (zero == (*itr))) ++itr; unsigned int digit; while (end != itr) { // Note: For 'physical' superscalar architectures it // is advised that the following loop be: 4xPD1 and 1xPD2 #ifdef exprtk_enable_superscalar parse_digit_1(d) parse_digit_1(d) #endif parse_digit_1(d) parse_digit_1(d) parse_digit_2(d) } if (curr != itr) instate = true; } int exponent = 0; if (end != itr) { if ('.' == (*itr)) { const Iterator curr = ++itr; unsigned int digit; T tmp_d = T(0); while (end != itr) { #ifdef exprtk_enable_superscalar parse_digit_1(tmp_d) parse_digit_1(tmp_d) parse_digit_1(tmp_d) #endif parse_digit_1(tmp_d) parse_digit_1(tmp_d) parse_digit_2(tmp_d) } if (curr != itr) { instate = true; d += compute_pow10(tmp_d,static_cast<int>(-std::distance(curr,itr))); } #undef parse_digit_1 #undef parse_digit_2 } if (end != itr) { typename std::iterator_traits<Iterator>::value_type c = (*itr); if (('e' == c) || ('E' == c)) { int exp = 0; if (!details::string_to_type_converter_impl_ref(++itr, end, exp)) { if (end == itr) return false; else c = (*itr); } exponent += exp; } if (end != itr) { if (('f' == c) || ('F' == c) || ('l' == c) || ('L' == c)) ++itr; else if ('#' == c) { if (end == ++itr) return false; else if (('I' <= (*itr)) && ((*itr) <= 'n')) { if (('i' == (*itr)) || ('I' == (*itr))) { return parse_inf(itr, end, t, negative); } else if (('n' == (*itr)) || ('N' == (*itr))) { return parse_nan(itr, end, t); } else return false; } else return false; } else if (('I' <= (*itr)) && ((*itr) <= 'n')) { if (('i' == (*itr)) || ('I' == (*itr))) { return parse_inf(itr, end, t, negative); } else if (('n' == (*itr)) || ('N' == (*itr))) { return parse_nan(itr, end, t); } else return false; } else return false; } } } if ((end != itr) || (!instate)) return false; else if (exponent) d = compute_pow10(d,exponent); t = static_cast<T>((negative) ? -d : d); return true; } template <typename T> inline bool string_to_real(const std::string& s, T& t) { const typename numeric::details::number_type<T>::type num_type; char_cptr begin = s.data(); char_cptr end = s.data() + s.size(); return string_to_real(begin, end, t, num_type); } template <typename T> struct functor_t { /* Note: The following definitions for Type, may require tweaking based on the compiler and target architecture. The benchmark should provide enough information to make the right choice. */ //typedef T Type; //typedef const T Type; typedef const T& Type; typedef T& RefType; typedef T (*qfunc_t)(Type t0, Type t1, Type t2, Type t3); typedef T (*tfunc_t)(Type t0, Type t1, Type t2); typedef T (*bfunc_t)(Type t0, Type t1); typedef T (*ufunc_t)(Type t0); }; } // namespace details namespace lexer { struct token { enum token_type { e_none = 0, e_error = 1, e_err_symbol = 2, e_err_number = 3, e_err_string = 4, e_err_sfunc = 5, e_eof = 6, e_number = 7, e_symbol = 8, e_string = 9, e_assign = 10, e_addass = 11, e_subass = 12, e_mulass = 13, e_divass = 14, e_modass = 15, e_shr = 16, e_shl = 17, e_lte = 18, e_ne = 19, e_gte = 20, e_swap = 21, e_lt = '<', e_gt = '>', e_eq = '=', e_rbracket = ')', e_lbracket = '(', e_rsqrbracket = ']', e_lsqrbracket = '[', e_rcrlbracket = '}', e_lcrlbracket = '{', e_comma = ',', e_add = '+', e_sub = '-', e_div = '/', e_mul = '*', e_mod = '%', e_pow = '^', e_colon = ':', e_ternary = '?' }; token() : type(e_none), value(""), position(std::numeric_limits<std::size_t>::max()) {} void clear() { type = e_none; value = ""; position = std::numeric_limits<std::size_t>::max(); } template <typename Iterator> inline token& set_operator(const token_type tt, const Iterator begin, const Iterator end, const Iterator base_begin = Iterator(0)) { type = tt; value.assign(begin,end); if (base_begin) position = static_cast<std::size_t>(std::distance(base_begin,begin)); return (*this); } template <typename Iterator> inline token& set_symbol(const Iterator begin, const Iterator end, const Iterator base_begin = Iterator(0)) { type = e_symbol; value.assign(begin,end); if (base_begin) position = static_cast<std::size_t>(std::distance(base_begin,begin)); return (*this); } inline token& set_symbol(const std::string& s, const std::size_t p) { type = e_symbol; value = s; position = p; return (*this); } template <typename Iterator> inline token& set_numeric(const Iterator begin, const Iterator end, const Iterator base_begin = Iterator(0)) { type = e_number; value.assign(begin,end); if (base_begin) position = static_cast<std::size_t>(std::distance(base_begin,begin)); return (*this); } template <typename Iterator> inline token& set_string(const Iterator begin, const Iterator end, const Iterator base_begin = Iterator(0)) { type = e_string; value.assign(begin,end); if (base_begin) position = static_cast<std::size_t>(std::distance(base_begin,begin)); return (*this); } inline token& set_string(const std::string& s, const std::size_t p) { type = e_string; value = s; position = p; return (*this); } template <typename Iterator> inline token& set_error(const token_type et, const Iterator begin, const Iterator end, const Iterator base_begin = Iterator(0)) { if ( (e_error == et) || (e_err_symbol == et) || (e_err_number == et) || (e_err_string == et) || (e_err_sfunc == et) ) { type = et; } else type = e_error; value.assign(begin,end); if (base_begin) position = static_cast<std::size_t>(std::distance(base_begin,begin)); return (*this); } static inline std::string to_str(token_type t) { switch (t) { case e_none : return "NONE"; case e_error : return "ERROR"; case e_err_symbol : return "ERROR_SYMBOL"; case e_err_number : return "ERROR_NUMBER"; case e_err_string : return "ERROR_STRING"; case e_eof : return "EOF"; case e_number : return "NUMBER"; case e_symbol : return "SYMBOL"; case e_string : return "STRING"; case e_assign : return ":="; case e_addass : return "+="; case e_subass : return "-="; case e_mulass : return "*="; case e_divass : return "/="; case e_modass : return "%="; case e_shr : return ">>"; case e_shl : return "<<"; case e_lte : return "<="; case e_ne : return "!="; case e_gte : return ">="; case e_lt : return "<"; case e_gt : return ">"; case e_eq : return "="; case e_rbracket : return ")"; case e_lbracket : return "("; case e_rsqrbracket : return "]"; case e_lsqrbracket : return "["; case e_rcrlbracket : return "}"; case e_lcrlbracket : return "{"; case e_comma : return ","; case e_add : return "+"; case e_sub : return "-"; case e_div : return "/"; case e_mul : return "*"; case e_mod : return "%"; case e_pow : return "^"; case e_colon : return ":"; case e_ternary : return "?"; case e_swap : return "<=>"; default : return "UNKNOWN"; } } inline bool is_error() const { return ( (e_error == type) || (e_err_symbol == type) || (e_err_number == type) || (e_err_string == type) || (e_err_sfunc == type) ); } token_type type; std::string value; std::size_t position; }; class generator { public: typedef token token_t; typedef std::vector<token_t> token_list_t; typedef std::vector<token_t>::iterator token_list_itr_t; typedef details::char_t char_t; generator() : base_itr_(0), s_itr_ (0), s_end_ (0) { clear(); } inline void clear() { base_itr_ = 0; s_itr_ = 0; s_end_ = 0; token_list_.clear(); token_itr_ = token_list_.end(); store_token_itr_ = token_list_.end(); } inline bool process(const std::string& str) { base_itr_ = str.data(); s_itr_ = str.data(); s_end_ = str.data() + str.size(); eof_token_.set_operator(token_t::e_eof,s_end_,s_end_,base_itr_); token_list_.clear(); while (!is_end(s_itr_)) { scan_token(); if (!token_list_.empty() && token_list_.back().is_error()) return false; } return true; } inline bool empty() const { return token_list_.empty(); } inline std::size_t size() const { return token_list_.size(); } inline void begin() { token_itr_ = token_list_.begin(); store_token_itr_ = token_list_.begin(); } inline void store() { store_token_itr_ = token_itr_; } inline void restore() { token_itr_ = store_token_itr_; } inline token_t& next_token() { if (token_list_.end() != token_itr_) { return *token_itr_++; } else return eof_token_; } inline token_t& peek_next_token() { if (token_list_.end() != token_itr_) { return *token_itr_; } else return eof_token_; } inline token_t& operator[](const std::size_t& index) { if (index < token_list_.size()) return token_list_[index]; else return eof_token_; } inline token_t operator[](const std::size_t& index) const { if (index < token_list_.size()) return token_list_[index]; else return eof_token_; } inline bool finished() const { return (token_list_.end() == token_itr_); } inline void insert_front(token_t::token_type tk_type) { if ( !token_list_.empty() && (token_list_.end() != token_itr_) ) { token_t t = *token_itr_; t.type = tk_type; token_itr_ = token_list_.insert(token_itr_,t); } } inline std::string substr(const std::size_t& begin, const std::size_t& end) { const details::char_cptr begin_itr = ((base_itr_ + begin) < s_end_) ? (base_itr_ + begin) : s_end_; const details::char_cptr end_itr = ((base_itr_ + end) < s_end_) ? (base_itr_ + end) : s_end_; return std::string(begin_itr,end_itr); } inline std::string remaining() const { if (finished()) return ""; else if (token_list_.begin() != token_itr_) return std::string(base_itr_ + (token_itr_ - 1)->position,s_end_); else return std::string(base_itr_ + token_itr_->position,s_end_); } private: inline bool is_end(details::char_cptr itr) { return (s_end_ == itr); } inline bool is_comment_start(details::char_cptr itr) { #ifndef exprtk_disable_comments const char_t c0 = *(itr + 0); const char_t c1 = *(itr + 1); if ('#' == c0) return true; else if (!is_end(itr + 1)) { if (('/' == c0) && ('/' == c1)) return true; if (('/' == c0) && ('*' == c1)) return true; } #endif return false; } inline void skip_whitespace() { while (!is_end(s_itr_) && details::is_whitespace(*s_itr_)) { ++s_itr_; } } inline void skip_comments() { #ifndef exprtk_disable_comments // The following comment styles are supported: // 1. // .... \n // 2. # .... \n // 3. /* .... */ struct test { static inline bool comment_start(const char_t c0, const char_t c1, int& mode, int& incr) { mode = 0; if ('#' == c0) { mode = 1; incr = 1; } else if ('/' == c0) { if ('/' == c1) { mode = 1; incr = 2; } else if ('*' == c1) { mode = 2; incr = 2; } } return (0 != mode); } static inline bool comment_end(const char_t c0, const char_t c1, int& mode) { if ( ((1 == mode) && ('\n' == c0)) || ((2 == mode) && ( '*' == c0) && ('/' == c1)) ) { mode = 0; return true; } else return false; } }; int mode = 0; int increment = 0; if (is_end(s_itr_)) return; else if (!test::comment_start(*s_itr_, *(s_itr_ + 1), mode, increment)) return; details::char_cptr cmt_start = s_itr_; s_itr_ += increment; while (!is_end(s_itr_)) { if ((1 == mode) && test::comment_end(*s_itr_, 0, mode)) { ++s_itr_; return; } if ((2 == mode)) { if (!is_end((s_itr_ + 1)) && test::comment_end(*s_itr_, *(s_itr_ + 1), mode)) { s_itr_ += 2; return; } } ++s_itr_; } if (2 == mode) { token_t t; t.set_error(token::e_error, cmt_start, cmt_start + mode, base_itr_); token_list_.push_back(t); } #endif } inline void scan_token() { if (details::is_whitespace(*s_itr_)) { skip_whitespace(); return; } else if (is_comment_start(s_itr_)) { skip_comments(); return; } else if (details::is_operator_char(*s_itr_)) { scan_operator(); return; } else if (details::is_letter(*s_itr_)) { scan_symbol(); return; } else if (details::is_digit((*s_itr_)) || ('.' == (*s_itr_))) { scan_number(); return; } else if ('$' == (*s_itr_)) { scan_special_function(); return; } #ifndef exprtk_disable_string_capabilities else if ('\'' == (*s_itr_)) { scan_string(); return; } #endif else if ('~' == (*s_itr_)) { token_t t; t.set_symbol(s_itr_, s_itr_ + 1, base_itr_); token_list_.push_back(t); ++s_itr_; return; } else { token_t t; t.set_error(token::e_error, s_itr_, s_itr_ + 2, base_itr_); token_list_.push_back(t); ++s_itr_; } } inline void scan_operator() { token_t t; const char_t c0 = s_itr_[0]; if (!is_end(s_itr_ + 1)) { const char_t c1 = s_itr_[1]; if (!is_end(s_itr_ + 2)) { const char_t c2 = s_itr_[2]; if ((c0 == '<') && (c1 == '=') && (c2 == '>')) { t.set_operator(token_t::e_swap, s_itr_, s_itr_ + 3, base_itr_); token_list_.push_back(t); s_itr_ += 3; return; } } token_t::token_type ttype = token_t::e_none; if ((c0 == '<') && (c1 == '=')) ttype = token_t::e_lte; else if ((c0 == '>') && (c1 == '=')) ttype = token_t::e_gte; else if ((c0 == '<') && (c1 == '>')) ttype = token_t::e_ne; else if ((c0 == '!') && (c1 == '=')) ttype = token_t::e_ne; else if ((c0 == '=') && (c1 == '=')) ttype = token_t::e_eq; else if ((c0 == ':') && (c1 == '=')) ttype = token_t::e_assign; else if ((c0 == '<') && (c1 == '<')) ttype = token_t::e_shl; else if ((c0 == '>') && (c1 == '>')) ttype = token_t::e_shr; else if ((c0 == '+') && (c1 == '=')) ttype = token_t::e_addass; else if ((c0 == '-') && (c1 == '=')) ttype = token_t::e_subass; else if ((c0 == '*') && (c1 == '=')) ttype = token_t::e_mulass; else if ((c0 == '/') && (c1 == '=')) ttype = token_t::e_divass; else if ((c0 == '%') && (c1 == '=')) ttype = token_t::e_modass; if (token_t::e_none != ttype) { t.set_operator(ttype, s_itr_, s_itr_ + 2, base_itr_); token_list_.push_back(t); s_itr_ += 2; return; } } if ('<' == c0) t.set_operator(token_t::e_lt , s_itr_, s_itr_ + 1, base_itr_); else if ('>' == c0) t.set_operator(token_t::e_gt , s_itr_, s_itr_ + 1, base_itr_); else if (';' == c0) t.set_operator(token_t::e_eof, s_itr_, s_itr_ + 1, base_itr_); else if ('&' == c0) t.set_symbol(s_itr_, s_itr_ + 1, base_itr_); else if ('|' == c0) t.set_symbol(s_itr_, s_itr_ + 1, base_itr_); else t.set_operator(token_t::token_type(c0), s_itr_, s_itr_ + 1, base_itr_); token_list_.push_back(t); ++s_itr_; } inline void scan_symbol() { details::char_cptr initial_itr = s_itr_; bool escaped = false; bool has_escape = false; while (!is_end(s_itr_)) { if ('\\' == (*s_itr_)) { escaped = true; has_escape = true; } else if (escaped) { escaped = false; } else if (!details::is_letter_or_digit(*s_itr_) && ('_' != (*s_itr_))) { if ('.' != (*s_itr_)) break; /* Permit symbols that contain a 'dot' Allowed : abc.xyz, a123.xyz, abc.123, abc_.xyz a123_.xyz abc._123 Disallowed: .abc, abc.<white-space>, abc.<eof>, abc.<operator +,-,*,/...> */ if ( (s_itr_ != initial_itr) && !is_end(s_itr_ + 1) && !details::is_letter_or_digit(*(s_itr_ + 1)) && ('_' != (*(s_itr_ + 1))) ) break; } ++s_itr_; } token_t t; if (!has_escape) t.set_symbol(initial_itr,s_itr_,base_itr_); else { std::string parsed_string(initial_itr,s_itr_); details::cleanup_escapes(parsed_string); t.set_symbol(parsed_string, static_cast<std::size_t>(std::distance(base_itr_,initial_itr))); } token_list_.push_back(t); } inline void scan_number() { /* Attempt to match a valid numeric value in one of the following formats: (01) 123456 (02) 123456. (03) 123.456 (04) 123.456e3 (05) 123.456E3 (06) 123.456e+3 (07) 123.456E+3 (08) 123.456e-3 (09) 123.456E-3 (00) .1234 (11) .1234e3 (12) .1234E+3 (13) .1234e+3 (14) .1234E-3 (15) .1234e-3 */ details::char_cptr initial_itr = s_itr_; bool dot_found = false; bool e_found = false; bool post_e_sign_found = false; bool post_e_digit_found = false; token_t t; while (!is_end(s_itr_)) { if ('.' == (*s_itr_)) { if (dot_found) { t.set_error(token::e_err_number, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } dot_found = true; ++s_itr_; continue; } else if ('e' == std::tolower(*s_itr_)) { const char_t& c = *(s_itr_ + 1); if (is_end(s_itr_ + 1)) { t.set_error(token::e_err_number, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } else if ( ('+' != c) && ('-' != c) && !details::is_digit(c) ) { t.set_error(token::e_err_number, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } e_found = true; ++s_itr_; continue; } else if (e_found && details::is_sign(*s_itr_) && !post_e_digit_found) { if (post_e_sign_found) { t.set_error(token::e_err_number, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } post_e_sign_found = true; ++s_itr_; continue; } else if (e_found && details::is_digit(*s_itr_)) { post_e_digit_found = true; ++s_itr_; continue; } else if (('.' != (*s_itr_)) && !details::is_digit(*s_itr_)) break; else ++s_itr_; } t.set_numeric(initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } inline void scan_special_function() { details::char_cptr initial_itr = s_itr_; token_t t; // $fdd(x,x,x) = at least 11 chars if (std::distance(s_itr_,s_end_) < 11) { t.set_error(token::e_err_sfunc, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } if ( !(('$' == *s_itr_) && (details::imatch ('f',*(s_itr_ + 1))) && (details::is_digit(*(s_itr_ + 2))) && (details::is_digit(*(s_itr_ + 3)))) ) { t.set_error(token::e_err_sfunc, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } s_itr_ += 4; // $fdd = 4chars t.set_symbol(initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } #ifndef exprtk_disable_string_capabilities inline void scan_string() { details::char_cptr initial_itr = s_itr_ + 1; token_t t; if (std::distance(s_itr_,s_end_) < 2) { t.set_error(token::e_err_string, s_itr_, s_end_, base_itr_); token_list_.push_back(t); return; } ++s_itr_; bool escaped_found = false; bool escaped = false; while (!is_end(s_itr_)) { if (!escaped && ('\\' == *s_itr_)) { escaped_found = true; escaped = true; ++s_itr_; continue; } else if (!escaped) { if ('\'' == *s_itr_) break; } else if (escaped) { if (!is_end(s_itr_) && ('0' == *(s_itr_))) { /* Note: The following 'awkward' conditional is due to various broken msvc compilers. */ #if defined(_MSC_VER) && (_MSC_VER == 1600) const bool within_range = !is_end(s_itr_ + 2) && !is_end(s_itr_ + 3) ; #else const bool within_range = !is_end(s_itr_ + 1) && !is_end(s_itr_ + 2) && !is_end(s_itr_ + 3) ; #endif const bool x_seperator = ('x' == *(s_itr_ + 1)) || ('X' == *(s_itr_ + 1)) ; const bool both_digits = details::is_hex_digit(*(s_itr_ + 2)) && details::is_hex_digit(*(s_itr_ + 3)) ; if (!within_range || !x_seperator || !both_digits) { t.set_error(token::e_err_string, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } else s_itr_ += 3; } escaped = false; } ++s_itr_; } if (is_end(s_itr_)) { t.set_error(token::e_err_string, initial_itr, s_itr_, base_itr_); token_list_.push_back(t); return; } if (!escaped_found) t.set_string(initial_itr, s_itr_, base_itr_); else { std::string parsed_string(initial_itr,s_itr_); details::cleanup_escapes(parsed_string); t.set_string( parsed_string, static_cast<std::size_t>(std::distance(base_itr_,initial_itr))); } token_list_.push_back(t); ++s_itr_; return; } #endif private: token_list_t token_list_; token_list_itr_t token_itr_; token_list_itr_t store_token_itr_; token_t eof_token_; details::char_cptr base_itr_; details::char_cptr s_itr_; details::char_cptr s_end_; friend class token_scanner; friend class token_modifier; friend class token_inserter; friend class token_joiner; }; class helper_interface { public: virtual void init() { } virtual void reset() { } virtual bool result() { return true; } virtual std::size_t process(generator&) { return 0; } virtual ~helper_interface() { } }; class token_scanner : public helper_interface { public: virtual ~token_scanner() {} explicit token_scanner(const std::size_t& stride) : stride_(stride) { if (stride > 4) { throw std::invalid_argument("token_scanner() - Invalid stride value"); } } inline std::size_t process(generator& g) { if (g.token_list_.size() >= stride_) { for (std::size_t i = 0; i < (g.token_list_.size() - stride_ + 1); ++i) { token t; switch (stride_) { case 1 : { const token& t0 = g.token_list_[i]; if (!operator()(t0)) { return i; } } break; case 2 : { const token& t0 = g.token_list_[i ]; const token& t1 = g.token_list_[i + 1]; if (!operator()(t0, t1)) { return i; } } break; case 3 : { const token& t0 = g.token_list_[i ]; const token& t1 = g.token_list_[i + 1]; const token& t2 = g.token_list_[i + 2]; if (!operator()(t0, t1, t2)) { return i; } } break; case 4 : { const token& t0 = g.token_list_[i ]; const token& t1 = g.token_list_[i + 1]; const token& t2 = g.token_list_[i + 2]; const token& t3 = g.token_list_[i + 3]; if (!operator()(t0, t1, t2, t3)) { return i; } } break; } } } return (g.token_list_.size() - stride_ + 1); } virtual bool operator() (const token&) { return false; } virtual bool operator() (const token&, const token&) { return false; } virtual bool operator() (const token&, const token&, const token&) { return false; } virtual bool operator() (const token&, const token&, const token&, const token&) { return false; } private: const std::size_t stride_; }; class token_modifier : public helper_interface { public: inline std::size_t process(generator& g) { std::size_t changes = 0; for (std::size_t i = 0; i < g.token_list_.size(); ++i) { if (modify(g.token_list_[i])) changes++; } return changes; } virtual bool modify(token& t) = 0; }; class token_inserter : public helper_interface { public: explicit token_inserter(const std::size_t& stride) : stride_(stride) { if (stride > 5) { throw std::invalid_argument("token_inserter() - Invalid stride value"); } } inline std::size_t process(generator& g) { if (g.token_list_.empty()) return 0; else if (g.token_list_.size() < stride_) return 0; std::size_t changes = 0; for (std::size_t i = 0; i < (g.token_list_.size() - stride_ + 1); ++i) { int insert_index = -1; token t; switch (stride_) { case 1 : insert_index = insert(g.token_list_[i],t); break; case 2 : insert_index = insert(g.token_list_[i], g.token_list_[i + 1], t); break; case 3 : insert_index = insert(g.token_list_[i], g.token_list_[i + 1], g.token_list_[i + 2], t); break; case 4 : insert_index = insert(g.token_list_[i], g.token_list_[i + 1], g.token_list_[i + 2], g.token_list_[i + 3], t); break; case 5 : insert_index = insert(g.token_list_[i], g.token_list_[i + 1], g.token_list_[i + 2], g.token_list_[i + 3], g.token_list_[i + 4], t); break; } typedef std::iterator_traits<generator::token_list_t::iterator>::difference_type diff_t; if ((insert_index >= 0) && (insert_index <= (static_cast<int>(stride_) + 1))) { g.token_list_.insert( g.token_list_.begin() + static_cast<diff_t>(i + static_cast<std::size_t>(insert_index)), t); changes++; } } return changes; } #define token_inserter_empty_body \ { \ return -1; \ } \ inline virtual int insert(const token&, token&) token_inserter_empty_body inline virtual int insert(const token&, const token&, token&) token_inserter_empty_body inline virtual int insert(const token&, const token&, const token&, token&) token_inserter_empty_body inline virtual int insert(const token&, const token&, const token&, const token&, token&) token_inserter_empty_body inline virtual int insert(const token&, const token&, const token&, const token&, const token&, token&) token_inserter_empty_body #undef token_inserter_empty_body private: const std::size_t stride_; }; class token_joiner : public helper_interface { public: token_joiner(const std::size_t& stride) : stride_(stride) {} inline std::size_t process(generator& g) { if (g.token_list_.empty()) return 0; switch (stride_) { case 2 : return process_stride_2(g); case 3 : return process_stride_3(g); default : return 0; } } virtual bool join(const token&, const token&, token&) { return false; } virtual bool join(const token&, const token&, const token&, token&) { return false; } private: inline std::size_t process_stride_2(generator& g) { typedef std::iterator_traits<generator::token_list_t::iterator>::difference_type diff_t; if (g.token_list_.size() < 2) return 0; std::size_t changes = 0; for (std::size_t i = 0; i < (g.token_list_.size() - 1); ++i) { token t; while (join(g[i], g[i + 1], t)) { g.token_list_[i] = t; g.token_list_.erase(g.token_list_.begin() + static_cast<diff_t>(i + 1)); ++changes; } } return changes; } inline std::size_t process_stride_3(generator& g) { typedef std::iterator_traits<generator::token_list_t::iterator>::difference_type diff_t; if (g.token_list_.size() < 3) return 0; std::size_t changes = 0; for (std::size_t i = 0; i < (g.token_list_.size() - 2); ++i) { token t; while (join(g[i], g[i + 1], g[i + 2], t)) { g.token_list_[i] = t; g.token_list_.erase(g.token_list_.begin() + static_cast<diff_t>(i + 1), g.token_list_.begin() + static_cast<diff_t>(i + 3)); ++changes; } } return changes; } const std::size_t stride_; }; namespace helper { inline void dump(lexer::generator& generator) { for (std::size_t i = 0; i < generator.size(); ++i) { lexer::token t = generator[i]; printf("Token[%02d] @ %03d %6s --> '%s'\n", static_cast<int>(i), static_cast<int>(t.position), t.to_str(t.type).c_str(), t.value.c_str()); } } class commutative_inserter : public lexer::token_inserter { public: using lexer::token_inserter::insert; commutative_inserter() : lexer::token_inserter(2) {} inline void ignore_symbol(const std::string& symbol) { ignore_set_.insert(symbol); } inline int insert(const lexer::token& t0, const lexer::token& t1, lexer::token& new_token) { bool match = false; new_token.type = lexer::token::e_mul; new_token.value = "*"; new_token.position = t1.position; if (t0.type == lexer::token::e_symbol) { if (ignore_set_.end() != ignore_set_.find(t0.value)) { return -1; } else if (!t0.value.empty() && ('$' == t0.value[0])) { return -1; } } if (t1.type == lexer::token::e_symbol) { if (ignore_set_.end() != ignore_set_.find(t1.value)) { return -1; } } if ((t0.type == lexer::token::e_number ) && (t1.type == lexer::token::e_symbol )) match = true; else if ((t0.type == lexer::token::e_number ) && (t1.type == lexer::token::e_lbracket )) match = true; else if ((t0.type == lexer::token::e_number ) && (t1.type == lexer::token::e_lcrlbracket)) match = true; else if ((t0.type == lexer::token::e_number ) && (t1.type == lexer::token::e_lsqrbracket)) match = true; else if ((t0.type == lexer::token::e_symbol ) && (t1.type == lexer::token::e_number )) match = true; else if ((t0.type == lexer::token::e_rbracket ) && (t1.type == lexer::token::e_number )) match = true; else if ((t0.type == lexer::token::e_rcrlbracket) && (t1.type == lexer::token::e_number )) match = true; else if ((t0.type == lexer::token::e_rsqrbracket) && (t1.type == lexer::token::e_number )) match = true; else if ((t0.type == lexer::token::e_rbracket ) && (t1.type == lexer::token::e_symbol )) match = true; else if ((t0.type == lexer::token::e_rcrlbracket) && (t1.type == lexer::token::e_symbol )) match = true; else if ((t0.type == lexer::token::e_rsqrbracket) && (t1.type == lexer::token::e_symbol )) match = true; return (match) ? 1 : -1; } private: std::set<std::string,details::ilesscompare> ignore_set_; }; class operator_joiner : public token_joiner { public: operator_joiner(const std::size_t& stride) : token_joiner(stride) {} inline bool join(const lexer::token& t0, const lexer::token& t1, lexer::token& t) { // ': =' --> ':=' if ((t0.type == lexer::token::e_colon) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_assign; t.value = ":="; t.position = t0.position; return true; } // '+ =' --> '+=' else if ((t0.type == lexer::token::e_add) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_addass; t.value = "+="; t.position = t0.position; return true; } // '- =' --> '-=' else if ((t0.type == lexer::token::e_sub) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_subass; t.value = "-="; t.position = t0.position; return true; } // '* =' --> '*=' else if ((t0.type == lexer::token::e_mul) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_mulass; t.value = "*="; t.position = t0.position; return true; } // '/ =' --> '/=' else if ((t0.type == lexer::token::e_div) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_divass; t.value = "/="; t.position = t0.position; return true; } // '% =' --> '%=' else if ((t0.type == lexer::token::e_mod) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_modass; t.value = "%="; t.position = t0.position; return true; } // '> =' --> '>=' else if ((t0.type == lexer::token::e_gt) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_gte; t.value = ">="; t.position = t0.position; return true; } // '< =' --> '<=' else if ((t0.type == lexer::token::e_lt) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_lte; t.value = "<="; t.position = t0.position; return true; } // '= =' --> '==' else if ((t0.type == lexer::token::e_eq) && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_eq; t.value = "=="; t.position = t0.position; return true; } // '! =' --> '!=' else if ((static_cast<char>(t0.type) == '!') && (t1.type == lexer::token::e_eq)) { t.type = lexer::token::e_ne; t.value = "!="; t.position = t0.position; return true; } // '< >' --> '<>' else if ((t0.type == lexer::token::e_lt) && (t1.type == lexer::token::e_gt)) { t.type = lexer::token::e_ne; t.value = "<>"; t.position = t0.position; return true; } // '<= >' --> '<=>' else if ((t0.type == lexer::token::e_lte) && (t1.type == lexer::token::e_gt)) { t.type = lexer::token::e_swap; t.value = "<=>"; t.position = t0.position; return true; } // '+ -' --> '-' else if ((t0.type == lexer::token::e_add) && (t1.type == lexer::token::e_sub)) { t.type = lexer::token::e_sub; t.value = "-"; t.position = t0.position; return true; } // '- +' --> '-' else if ((t0.type == lexer::token::e_sub) && (t1.type == lexer::token::e_add)) { t.type = lexer::token::e_sub; t.value = "-"; t.position = t0.position; return true; } // '- -' --> '+' else if ((t0.type == lexer::token::e_sub) && (t1.type == lexer::token::e_sub)) { /* Note: May need to reconsider this when wanting to implement pre/postfix decrement operator */ t.type = lexer::token::e_add; t.value = "+"; t.position = t0.position; return true; } else return false; } inline bool join(const lexer::token& t0, const lexer::token& t1, const lexer::token& t2, lexer::token& t) { // '[ * ]' --> '[*]' if ( (t0.type == lexer::token::e_lsqrbracket) && (t1.type == lexer::token::e_mul ) && (t2.type == lexer::token::e_rsqrbracket) ) { t.type = lexer::token::e_symbol; t.value = "[*]"; t.position = t0.position; return true; } else return false; } }; class bracket_checker : public lexer::token_scanner { public: using lexer::token_scanner::operator(); bracket_checker() : token_scanner(1), state_(true) {} bool result() { if (!stack_.empty()) { lexer::token t; t.value = stack_.top().first; t.position = stack_.top().second; error_token_ = t; state_ = false; return false; } else return state_; } lexer::token error_token() { return error_token_; } void reset() { // Why? because msvc doesn't support swap properly. stack_ = std::stack<std::pair<char,std::size_t> >(); state_ = true; error_token_.clear(); } bool operator() (const lexer::token& t) { if ( !t.value.empty() && (lexer::token::e_string != t.type) && (lexer::token::e_symbol != t.type) && exprtk::details::is_bracket(t.value[0]) ) { details::char_t c = t.value[0]; if (t.type == lexer::token::e_lbracket ) stack_.push(std::make_pair(')',t.position)); else if (t.type == lexer::token::e_lcrlbracket) stack_.push(std::make_pair('}',t.position)); else if (t.type == lexer::token::e_lsqrbracket) stack_.push(std::make_pair(']',t.position)); else if (exprtk::details::is_right_bracket(c)) { if (stack_.empty()) { state_ = false; error_token_ = t; return false; } else if (c != stack_.top().first) { state_ = false; error_token_ = t; return false; } else stack_.pop(); } } return true; } private: bool state_; std::stack<std::pair<char,std::size_t> > stack_; lexer::token error_token_; }; class numeric_checker : public lexer::token_scanner { public: using lexer::token_scanner::operator(); numeric_checker() : token_scanner (1), current_index_(0) {} bool result() { return error_list_.empty(); } void reset() { error_list_.clear(); current_index_ = 0; } bool operator() (const lexer::token& t) { if (token::e_number == t.type) { double v; if (!exprtk::details::string_to_real(t.value,v)) { error_list_.push_back(current_index_); } } ++current_index_; return true; } std::size_t error_count() const { return error_list_.size(); } std::size_t error_index(const std::size_t& i) { if (i < error_list_.size()) return error_list_[i]; else return std::numeric_limits<std::size_t>::max(); } void clear_errors() { error_list_.clear(); } private: std::size_t current_index_; std::vector<std::size_t> error_list_; }; class symbol_replacer : public lexer::token_modifier { private: typedef std::map<std::string,std::pair<std::string,token::token_type>,details::ilesscompare> replace_map_t; public: bool remove(const std::string& target_symbol) { const replace_map_t::iterator itr = replace_map_.find(target_symbol); if (replace_map_.end() == itr) return false; replace_map_.erase(itr); return true; } bool add_replace(const std::string& target_symbol, const std::string& replace_symbol, const lexer::token::token_type token_type = lexer::token::e_symbol) { const replace_map_t::iterator itr = replace_map_.find(target_symbol); if (replace_map_.end() != itr) { return false; } replace_map_[target_symbol] = std::make_pair(replace_symbol,token_type); return true; } void clear() { replace_map_.clear(); } private: bool modify(lexer::token& t) { if (lexer::token::e_symbol == t.type) { if (replace_map_.empty()) return false; const replace_map_t::iterator itr = replace_map_.find(t.value); if (replace_map_.end() != itr) { t.value = itr->second.first; t.type = itr->second.second; return true; } } return false; } replace_map_t replace_map_; }; class sequence_validator : public lexer::token_scanner { private: typedef std::pair<lexer::token::token_type,lexer::token::token_type> token_pair_t; typedef std::set<token_pair_t> set_t; public: using lexer::token_scanner::operator(); sequence_validator() : lexer::token_scanner(2) { add_invalid(lexer::token::e_number ,lexer::token::e_number ); add_invalid(lexer::token::e_string ,lexer::token::e_string ); add_invalid(lexer::token::e_number ,lexer::token::e_string ); add_invalid(lexer::token::e_string ,lexer::token::e_number ); add_invalid_set1(lexer::token::e_assign ); add_invalid_set1(lexer::token::e_shr ); add_invalid_set1(lexer::token::e_shl ); add_invalid_set1(lexer::token::e_lte ); add_invalid_set1(lexer::token::e_ne ); add_invalid_set1(lexer::token::e_gte ); add_invalid_set1(lexer::token::e_lt ); add_invalid_set1(lexer::token::e_gt ); add_invalid_set1(lexer::token::e_eq ); add_invalid_set1(lexer::token::e_comma ); add_invalid_set1(lexer::token::e_add ); add_invalid_set1(lexer::token::e_sub ); add_invalid_set1(lexer::token::e_div ); add_invalid_set1(lexer::token::e_mul ); add_invalid_set1(lexer::token::e_mod ); add_invalid_set1(lexer::token::e_pow ); add_invalid_set1(lexer::token::e_colon ); add_invalid_set1(lexer::token::e_ternary); } bool result() { return error_list_.empty(); } bool operator() (const lexer::token& t0, const lexer::token& t1) { const set_t::value_type p = std::make_pair(t0.type,t1.type); if (invalid_bracket_check(t0.type,t1.type)) { error_list_.push_back(std::make_pair(t0,t1)); } else if (invalid_comb_.find(p) != invalid_comb_.end()) { error_list_.push_back(std::make_pair(t0,t1)); } return true; } std::size_t error_count() const { return error_list_.size(); } std::pair<lexer::token,lexer::token> error(const std::size_t index) { if (index < error_list_.size()) { return error_list_[index]; } else { static const lexer::token error_token; return std::make_pair(error_token,error_token); } } void clear_errors() { error_list_.clear(); } private: void add_invalid(lexer::token::token_type base, lexer::token::token_type t) { invalid_comb_.insert(std::make_pair(base,t)); } void add_invalid_set1(lexer::token::token_type t) { add_invalid(t,lexer::token::e_assign); add_invalid(t,lexer::token::e_shr ); add_invalid(t,lexer::token::e_shl ); add_invalid(t,lexer::token::e_lte ); add_invalid(t,lexer::token::e_ne ); add_invalid(t,lexer::token::e_gte ); add_invalid(t,lexer::token::e_lt ); add_invalid(t,lexer::token::e_gt ); add_invalid(t,lexer::token::e_eq ); add_invalid(t,lexer::token::e_comma ); add_invalid(t,lexer::token::e_div ); add_invalid(t,lexer::token::e_mul ); add_invalid(t,lexer::token::e_mod ); add_invalid(t,lexer::token::e_pow ); add_invalid(t,lexer::token::e_colon ); } bool invalid_bracket_check(lexer::token::token_type base, lexer::token::token_type t) { if (details::is_right_bracket(static_cast<char>(base))) { switch (t) { case lexer::token::e_assign : return (']' != base); case lexer::token::e_string : return true; default : return false; } } else if (details::is_left_bracket(static_cast<char>(base))) { if (details::is_right_bracket(static_cast<char>(t))) return false; else if (details::is_left_bracket(static_cast<char>(t))) return false; else { switch (t) { case lexer::token::e_number : return false; case lexer::token::e_symbol : return false; case lexer::token::e_string : return false; case lexer::token::e_add : return false; case lexer::token::e_sub : return false; case lexer::token::e_colon : return false; case lexer::token::e_ternary : return false; default : return true ; } } } else if (details::is_right_bracket(static_cast<char>(t))) { switch (base) { case lexer::token::e_number : return false; case lexer::token::e_symbol : return false; case lexer::token::e_string : return false; case lexer::token::e_eof : return false; case lexer::token::e_colon : return false; case lexer::token::e_ternary : return false; default : return true ; } } else if (details::is_left_bracket(static_cast<char>(t))) { switch (base) { case lexer::token::e_rbracket : return true; case lexer::token::e_rsqrbracket : return true; case lexer::token::e_rcrlbracket : return true; default : return false; } } return false; } set_t invalid_comb_; std::vector<std::pair<lexer::token,lexer::token> > error_list_; }; struct helper_assembly { inline bool register_scanner(lexer::token_scanner* scanner) { if (token_scanner_list.end() != std::find(token_scanner_list.begin(), token_scanner_list.end (), scanner)) { return false; } token_scanner_list.push_back(scanner); return true; } inline bool register_modifier(lexer::token_modifier* modifier) { if (token_modifier_list.end() != std::find(token_modifier_list.begin(), token_modifier_list.end (), modifier)) { return false; } token_modifier_list.push_back(modifier); return true; } inline bool register_joiner(lexer::token_joiner* joiner) { if (token_joiner_list.end() != std::find(token_joiner_list.begin(), token_joiner_list.end (), joiner)) { return false; } token_joiner_list.push_back(joiner); return true; } inline bool register_inserter(lexer::token_inserter* inserter) { if (token_inserter_list.end() != std::find(token_inserter_list.begin(), token_inserter_list.end (), inserter)) { return false; } token_inserter_list.push_back(inserter); return true; } inline bool run_modifiers(lexer::generator& g) { error_token_modifier = reinterpret_cast<lexer::token_modifier*>(0); for (std::size_t i = 0; i < token_modifier_list.size(); ++i) { lexer::token_modifier& modifier = (*token_modifier_list[i]); modifier.reset(); modifier.process(g); if (!modifier.result()) { error_token_modifier = token_modifier_list[i]; return false; } } return true; } inline bool run_joiners(lexer::generator& g) { error_token_joiner = reinterpret_cast<lexer::token_joiner*>(0); for (std::size_t i = 0; i < token_joiner_list.size(); ++i) { lexer::token_joiner& joiner = (*token_joiner_list[i]); joiner.reset(); joiner.process(g); if (!joiner.result()) { error_token_joiner = token_joiner_list[i]; return false; } } return true; } inline bool run_inserters(lexer::generator& g) { error_token_inserter = reinterpret_cast<lexer::token_inserter*>(0); for (std::size_t i = 0; i < token_inserter_list.size(); ++i) { lexer::token_inserter& inserter = (*token_inserter_list[i]); inserter.reset(); inserter.process(g); if (!inserter.result()) { error_token_inserter = token_inserter_list[i]; return false; } } return true; } inline bool run_scanners(lexer::generator& g) { error_token_scanner = reinterpret_cast<lexer::token_scanner*>(0); for (std::size_t i = 0; i < token_scanner_list.size(); ++i) { lexer::token_scanner& scanner = (*token_scanner_list[i]); scanner.reset(); scanner.process(g); if (!scanner.result()) { error_token_scanner = token_scanner_list[i]; return false; } } return true; } std::vector<lexer::token_scanner*> token_scanner_list; std::vector<lexer::token_modifier*> token_modifier_list; std::vector<lexer::token_joiner*> token_joiner_list; std::vector<lexer::token_inserter*> token_inserter_list; lexer::token_scanner* error_token_scanner; lexer::token_modifier* error_token_modifier; lexer::token_joiner* error_token_joiner; lexer::token_inserter* error_token_inserter; }; } class parser_helper { public: typedef token token_t; typedef generator generator_t; inline bool init(const std::string& str) { if (!lexer_.process(str)) { return false; } lexer_.begin(); next_token(); return true; } inline generator_t& lexer() { return lexer_; } inline const generator_t& lexer() const { return lexer_; } inline void store_token() { lexer_.store(); store_current_token_ = current_token_; } inline void restore_token() { lexer_.restore(); current_token_ = store_current_token_; } inline void next_token() { current_token_ = lexer_.next_token(); } inline const token_t& current_token() const { return current_token_; } enum token_advance_mode { e_hold = 0, e_advance = 1 }; inline void advance_token(const token_advance_mode mode) { if (e_advance == mode) { next_token(); } } inline bool token_is(const token_t::token_type& ttype, const token_advance_mode mode = e_advance) { if (current_token().type != ttype) { return false; } advance_token(mode); return true; } inline bool token_is(const token_t::token_type& ttype, const std::string& value, const token_advance_mode mode = e_advance) { if ( (current_token().type != ttype) || !exprtk::details::imatch(value,current_token().value) ) { return false; } advance_token(mode); return true; } inline bool peek_token_is(const token_t::token_type& ttype) { return (lexer_.peek_next_token().type == ttype); } inline bool peek_token_is(const std::string& s) { return (exprtk::details::imatch(lexer_.peek_next_token().value,s)); } private: generator_t lexer_; token_t current_token_; token_t store_current_token_; }; } template <typename T> class vector_view { public: typedef T* data_ptr_t; vector_view(data_ptr_t data, const std::size_t& size) : size_(size), data_(data), data_ref_(0) {} vector_view(const vector_view<T>& vv) : size_(vv.size_), data_(vv.data_), data_ref_(0) {} inline void rebase(data_ptr_t data) { data_ = data; if (!data_ref_.empty()) { for (std::size_t i = 0; i < data_ref_.size(); ++i) { (*data_ref_[i]) = data; } } } inline data_ptr_t data() const { return data_; } inline std::size_t size() const { return size_; } inline const T& operator[](const std::size_t index) const { return data_[index]; } inline T& operator[](const std::size_t index) { return data_[index]; } void set_ref(data_ptr_t* data_ref) { data_ref_.push_back(data_ref); } private: const std::size_t size_; data_ptr_t data_; std::vector<data_ptr_t*> data_ref_; }; template <typename T> inline vector_view<T> make_vector_view(T* data, const std::size_t size, const std::size_t offset = 0) { return vector_view<T>(data + offset,size); } template <typename T> inline vector_view<T> make_vector_view(std::vector<T>& v, const std::size_t size, const std::size_t offset = 0) { return vector_view<T>(v.data() + offset,size); } template <typename T> class results_context; template <typename T> struct type_store { enum store_type { e_unknown, e_scalar , e_vector , e_string }; type_store() : size(0), data(0), type(e_unknown) {} std::size_t size; void* data; store_type type; class parameter_list { public: parameter_list(std::vector<type_store>& pl) : parameter_list_(pl) {} inline bool empty() const { return parameter_list_.empty(); } inline std::size_t size() const { return parameter_list_.size(); } inline type_store& operator[](const std::size_t& index) { return parameter_list_[index]; } inline const type_store& operator[](const std::size_t& index) const { return parameter_list_[index]; } inline type_store& front() { return parameter_list_[0]; } inline const type_store& front() const { return parameter_list_[0]; } inline type_store& back() { return parameter_list_.back(); } inline const type_store& back() const { return parameter_list_.back(); } private: std::vector<type_store>& parameter_list_; friend class results_context<T>; }; template <typename ViewType> struct type_view { typedef type_store<T> type_store_t; typedef ViewType value_t; type_view(type_store_t& ts) : ts_(ts), data_(reinterpret_cast<value_t*>(ts_.data)) {} type_view(const type_store_t& ts) : ts_(const_cast<type_store_t&>(ts)), data_(reinterpret_cast<value_t*>(ts_.data)) {} inline std::size_t size() const { return ts_.size; } inline value_t& operator[](const std::size_t& i) { return data_[i]; } inline const value_t& operator[](const std::size_t& i) const { return data_[i]; } inline const value_t* begin() const { return data_; } inline value_t* begin() { return data_; } inline const value_t* end() const { return static_cast<value_t*>(data_ + ts_.size); } inline value_t* end() { return static_cast<value_t*>(data_ + ts_.size); } type_store_t& ts_; value_t* data_; }; typedef type_view<T> vector_view; typedef type_view<char> string_view; struct scalar_view { typedef type_store<T> type_store_t; typedef T value_t; scalar_view(type_store_t& ts) : v_(*reinterpret_cast<value_t*>(ts.data)) {} scalar_view(const type_store_t& ts) : v_(*reinterpret_cast<value_t*>(const_cast<type_store_t&>(ts).data)) {} inline value_t& operator() () { return v_; } inline const value_t& operator() () const { return v_; } template <typename IntType> inline bool to_int(IntType& i) const { if (!exprtk::details::numeric::is_integer(v_)) return false; i = static_cast<IntType>(v_); return true; } template <typename UIntType> inline bool to_uint(UIntType& u) const { if (v_ < T(0)) return false; else if (!exprtk::details::numeric::is_integer(v_)) return false; u = static_cast<UIntType>(v_); return true; } T& v_; }; }; template <typename StringView> inline std::string to_str(const StringView& view) { return std::string(view.begin(),view.size()); } #ifndef exprtk_disable_return_statement namespace details { template <typename T> class return_node; template <typename T> class return_envelope_node; } #endif template <typename T> class results_context { public: typedef type_store<T> type_store_t; results_context() : results_available_(false) {} inline std::size_t count() const { if (results_available_) return parameter_list_.size(); else return 0; } inline type_store_t& operator[](const std::size_t& index) { return parameter_list_[index]; } inline const type_store_t& operator[](const std::size_t& index) const { return parameter_list_[index]; } private: inline void clear() { results_available_ = false; } typedef std::vector<type_store_t> ts_list_t; typedef typename type_store_t::parameter_list parameter_list_t; inline void assign(const parameter_list_t& pl) { parameter_list_ = pl.parameter_list_; results_available_ = true; } bool results_available_; ts_list_t parameter_list_; #ifndef exprtk_disable_return_statement friend class details::return_node<T>; friend class details::return_envelope_node<T>; #endif }; namespace details { enum operator_type { e_default , e_null , e_add , e_sub , e_mul , e_div , e_mod , e_pow , e_atan2 , e_min , e_max , e_avg , e_sum , e_prod , e_lt , e_lte , e_eq , e_equal , e_ne , e_nequal , e_gte , e_gt , e_and , e_nand , e_or , e_nor , e_xor , e_xnor , e_mand , e_mor , e_scand , e_scor , e_shr , e_shl , e_abs , e_acos , e_acosh , e_asin , e_asinh , e_atan , e_atanh , e_ceil , e_cos , e_cosh , e_exp , e_expm1 , e_floor , e_log , e_log10 , e_log2 , e_log1p , e_logn , e_neg , e_pos , e_round , e_roundn , e_root , e_sqrt , e_sin , e_sinc , e_sinh , e_sec , e_csc , e_tan , e_tanh , e_cot , e_clamp , e_iclamp , e_inrange , e_sgn , e_r2d , e_d2r , e_d2g , e_g2d , e_hypot , e_notl , e_erf , e_erfc , e_ncdf , e_frac , e_trunc , e_assign , e_addass , e_subass , e_mulass , e_divass , e_modass , e_in , e_like , e_ilike , e_multi , e_smulti , e_swap , // Do not add new functions/operators after this point. e_sf00 = 1000, e_sf01 = 1001, e_sf02 = 1002, e_sf03 = 1003, e_sf04 = 1004, e_sf05 = 1005, e_sf06 = 1006, e_sf07 = 1007, e_sf08 = 1008, e_sf09 = 1009, e_sf10 = 1010, e_sf11 = 1011, e_sf12 = 1012, e_sf13 = 1013, e_sf14 = 1014, e_sf15 = 1015, e_sf16 = 1016, e_sf17 = 1017, e_sf18 = 1018, e_sf19 = 1019, e_sf20 = 1020, e_sf21 = 1021, e_sf22 = 1022, e_sf23 = 1023, e_sf24 = 1024, e_sf25 = 1025, e_sf26 = 1026, e_sf27 = 1027, e_sf28 = 1028, e_sf29 = 1029, e_sf30 = 1030, e_sf31 = 1031, e_sf32 = 1032, e_sf33 = 1033, e_sf34 = 1034, e_sf35 = 1035, e_sf36 = 1036, e_sf37 = 1037, e_sf38 = 1038, e_sf39 = 1039, e_sf40 = 1040, e_sf41 = 1041, e_sf42 = 1042, e_sf43 = 1043, e_sf44 = 1044, e_sf45 = 1045, e_sf46 = 1046, e_sf47 = 1047, e_sf48 = 1048, e_sf49 = 1049, e_sf50 = 1050, e_sf51 = 1051, e_sf52 = 1052, e_sf53 = 1053, e_sf54 = 1054, e_sf55 = 1055, e_sf56 = 1056, e_sf57 = 1057, e_sf58 = 1058, e_sf59 = 1059, e_sf60 = 1060, e_sf61 = 1061, e_sf62 = 1062, e_sf63 = 1063, e_sf64 = 1064, e_sf65 = 1065, e_sf66 = 1066, e_sf67 = 1067, e_sf68 = 1068, e_sf69 = 1069, e_sf70 = 1070, e_sf71 = 1071, e_sf72 = 1072, e_sf73 = 1073, e_sf74 = 1074, e_sf75 = 1075, e_sf76 = 1076, e_sf77 = 1077, e_sf78 = 1078, e_sf79 = 1079, e_sf80 = 1080, e_sf81 = 1081, e_sf82 = 1082, e_sf83 = 1083, e_sf84 = 1084, e_sf85 = 1085, e_sf86 = 1086, e_sf87 = 1087, e_sf88 = 1088, e_sf89 = 1089, e_sf90 = 1090, e_sf91 = 1091, e_sf92 = 1092, e_sf93 = 1093, e_sf94 = 1094, e_sf95 = 1095, e_sf96 = 1096, e_sf97 = 1097, e_sf98 = 1098, e_sf99 = 1099, e_sffinal = 1100, e_sf4ext00 = 2000, e_sf4ext01 = 2001, e_sf4ext02 = 2002, e_sf4ext03 = 2003, e_sf4ext04 = 2004, e_sf4ext05 = 2005, e_sf4ext06 = 2006, e_sf4ext07 = 2007, e_sf4ext08 = 2008, e_sf4ext09 = 2009, e_sf4ext10 = 2010, e_sf4ext11 = 2011, e_sf4ext12 = 2012, e_sf4ext13 = 2013, e_sf4ext14 = 2014, e_sf4ext15 = 2015, e_sf4ext16 = 2016, e_sf4ext17 = 2017, e_sf4ext18 = 2018, e_sf4ext19 = 2019, e_sf4ext20 = 2020, e_sf4ext21 = 2021, e_sf4ext22 = 2022, e_sf4ext23 = 2023, e_sf4ext24 = 2024, e_sf4ext25 = 2025, e_sf4ext26 = 2026, e_sf4ext27 = 2027, e_sf4ext28 = 2028, e_sf4ext29 = 2029, e_sf4ext30 = 2030, e_sf4ext31 = 2031, e_sf4ext32 = 2032, e_sf4ext33 = 2033, e_sf4ext34 = 2034, e_sf4ext35 = 2035, e_sf4ext36 = 2036, e_sf4ext37 = 2037, e_sf4ext38 = 2038, e_sf4ext39 = 2039, e_sf4ext40 = 2040, e_sf4ext41 = 2041, e_sf4ext42 = 2042, e_sf4ext43 = 2043, e_sf4ext44 = 2044, e_sf4ext45 = 2045, e_sf4ext46 = 2046, e_sf4ext47 = 2047, e_sf4ext48 = 2048, e_sf4ext49 = 2049, e_sf4ext50 = 2050, e_sf4ext51 = 2051, e_sf4ext52 = 2052, e_sf4ext53 = 2053, e_sf4ext54 = 2054, e_sf4ext55 = 2055, e_sf4ext56 = 2056, e_sf4ext57 = 2057, e_sf4ext58 = 2058, e_sf4ext59 = 2059, e_sf4ext60 = 2060, e_sf4ext61 = 2061 }; inline std::string to_str(const operator_type opr) { switch (opr) { case e_add : return "+"; case e_sub : return "-"; case e_mul : return "*"; case e_div : return "/"; case e_mod : return "%"; case e_pow : return "^"; case e_assign : return ":="; case e_addass : return "+="; case e_subass : return "-="; case e_mulass : return "*="; case e_divass : return "/="; case e_modass : return "%="; case e_lt : return "<"; case e_lte : return "<="; case e_eq : return "=="; case e_equal : return "="; case e_ne : return "!="; case e_nequal : return "<>"; case e_gte : return ">="; case e_gt : return ">"; default : return"N/A"; } } struct base_operation_t { base_operation_t(const operator_type t, const unsigned int& np) : type(t), num_params(np) {} operator_type type; unsigned int num_params; }; namespace loop_unroll { #ifndef exprtk_disable_superscalar_unroll const unsigned int global_loop_batch_size = 16; #else const unsigned int global_loop_batch_size = 4; #endif struct details { details(const std::size_t& vsize, const unsigned int loop_batch_size = global_loop_batch_size) : batch_size(loop_batch_size ), remainder (vsize % batch_size), upper_bound(static_cast<int>(vsize - (remainder ? loop_batch_size : 0))) {} unsigned int batch_size; int remainder; int upper_bound; }; } #ifdef exprtk_enable_debugging inline void dump_ptr(const std::string& s, const void* ptr, const std::size_t size = 0) { if (size) exprtk_debug(("%s - addr: %p\n",s.c_str(),ptr)); else exprtk_debug(("%s - addr: %p size: %d\n", s.c_str(), ptr, static_cast<unsigned int>(size))); } #else inline void dump_ptr(const std::string&, const void*) {} inline void dump_ptr(const std::string&, const void*, const std::size_t) {} #endif template <typename T> class vec_data_store { public: typedef vec_data_store<T> type; typedef T* data_t; private: struct control_block { control_block() : ref_count(1), size (0), data (0), destruct (true) {} control_block(const std::size_t& dsize) : ref_count(1 ), size (dsize), data (0 ), destruct (true ) { create_data(); } control_block(const std::size_t& dsize, data_t dptr, bool dstrct = false) : ref_count(1 ), size (dsize ), data (dptr ), destruct (dstrct) {} ~control_block() { if (data && destruct && (0 == ref_count)) { dump_ptr("~control_block() data",data); delete[] data; data = reinterpret_cast<data_t>(0); } } static inline control_block* create(const std::size_t& dsize, data_t data_ptr = data_t(0), bool dstrct = false) { if (dsize) { if (0 == data_ptr) return (new control_block(dsize)); else return (new control_block(dsize, data_ptr, dstrct)); } else return (new control_block); } static inline void destroy(control_block*& cntrl_blck) { if (cntrl_blck) { if ( (0 != cntrl_blck->ref_count) && (0 == --cntrl_blck->ref_count) ) { delete cntrl_blck; } cntrl_blck = 0; } } std::size_t ref_count; std::size_t size; data_t data; bool destruct; private: control_block(const control_block&); control_block& operator=(const control_block&); inline void create_data() { destruct = true; data = new T[size]; std::fill_n(data,size,T(0)); dump_ptr("control_block::create_data() - data",data,size); } }; public: vec_data_store() : control_block_(control_block::create(0)) {} vec_data_store(const std::size_t& size) : control_block_(control_block::create(size,(data_t)(0),true)) {} vec_data_store(const std::size_t& size, data_t data, bool dstrct = false) : control_block_(control_block::create(size, data, dstrct)) {} vec_data_store(const type& vds) { control_block_ = vds.control_block_; control_block_->ref_count++; } ~vec_data_store() { control_block::destroy(control_block_); } type& operator=(const type& vds) { if (this != &vds) { std::size_t final_size = min_size(control_block_, vds.control_block_); vds.control_block_->size = final_size; control_block_->size = final_size; if (control_block_->destruct || (0 == control_block_->data)) { control_block::destroy(control_block_); control_block_ = vds.control_block_; control_block_->ref_count++; } } return (*this); } inline data_t data() { return control_block_->data; } inline data_t data() const { return control_block_->data; } inline std::size_t size() { return control_block_->size; } inline std::size_t size() const { return control_block_->size; } inline data_t& ref() { return control_block_->data; } inline void dump() const { #ifdef exprtk_enable_debugging exprtk_debug(("size: %d\taddress:%p\tdestruct:%c\n", size(), data(), (control_block_->destruct ? 'T' : 'F'))); for (std::size_t i = 0; i < size(); ++i) { if (5 == i) exprtk_debug(("\n")); exprtk_debug(("%15.10f ",data()[i])); } exprtk_debug(("\n")); #endif } static inline void match_sizes(type& vds0, type& vds1) { std::size_t size = min_size(vds0.control_block_,vds1.control_block_); vds0.control_block_->size = size; vds1.control_block_->size = size; } private: static inline std::size_t min_size(control_block* cb0, control_block* cb1) { const std::size_t size0 = cb0->size; const std::size_t size1 = cb1->size; if (size0 && size1) return std::min(size0,size1); else return (size0) ? size0 : size1; } control_block* control_block_; }; namespace numeric { namespace details { template <typename T> inline T process_impl(const operator_type operation, const T arg) { switch (operation) { case e_abs : return numeric::abs (arg); case e_acos : return numeric::acos (arg); case e_acosh : return numeric::acosh(arg); case e_asin : return numeric::asin (arg); case e_asinh : return numeric::asinh(arg); case e_atan : return numeric::atan (arg); case e_atanh : return numeric::atanh(arg); case e_ceil : return numeric::ceil (arg); case e_cos : return numeric::cos (arg); case e_cosh : return numeric::cosh (arg); case e_exp : return numeric::exp (arg); case e_expm1 : return numeric::expm1(arg); case e_floor : return numeric::floor(arg); case e_log : return numeric::log (arg); case e_log10 : return numeric::log10(arg); case e_log2 : return numeric::log2 (arg); case e_log1p : return numeric::log1p(arg); case e_neg : return numeric::neg (arg); case e_pos : return numeric::pos (arg); case e_round : return numeric::round(arg); case e_sin : return numeric::sin (arg); case e_sinc : return numeric::sinc (arg); case e_sinh : return numeric::sinh (arg); case e_sqrt : return numeric::sqrt (arg); case e_tan : return numeric::tan (arg); case e_tanh : return numeric::tanh (arg); case e_cot : return numeric::cot (arg); case e_sec : return numeric::sec (arg); case e_csc : return numeric::csc (arg); case e_r2d : return numeric::r2d (arg); case e_d2r : return numeric::d2r (arg); case e_d2g : return numeric::d2g (arg); case e_g2d : return numeric::g2d (arg); case e_notl : return numeric::notl (arg); case e_sgn : return numeric::sgn (arg); case e_erf : return numeric::erf (arg); case e_erfc : return numeric::erfc (arg); case e_ncdf : return numeric::ncdf (arg); case e_frac : return numeric::frac (arg); case e_trunc : return numeric::trunc(arg); default : exprtk_debug(("numeric::details::process_impl<T> - Invalid unary operation.\n")); return std::numeric_limits<T>::quiet_NaN(); } } template <typename T> inline T process_impl(const operator_type operation, const T arg0, const T arg1) { switch (operation) { case e_add : return (arg0 + arg1); case e_sub : return (arg0 - arg1); case e_mul : return (arg0 * arg1); case e_div : return (arg0 / arg1); case e_mod : return modulus<T>(arg0,arg1); case e_pow : return pow<T>(arg0,arg1); case e_atan2 : return atan2<T>(arg0,arg1); case e_min : return std::min<T>(arg0,arg1); case e_max : return std::max<T>(arg0,arg1); case e_logn : return logn<T>(arg0,arg1); case e_lt : return (arg0 < arg1) ? T(1) : T(0); case e_lte : return (arg0 <= arg1) ? T(1) : T(0); case e_eq : return std::equal_to<T>()(arg0,arg1) ? T(1) : T(0); case e_ne : return std::not_equal_to<T>()(arg0,arg1) ? T(1) : T(0); case e_gte : return (arg0 >= arg1) ? T(1) : T(0); case e_gt : return (arg0 > arg1) ? T(1) : T(0); case e_and : return and_opr <T>(arg0,arg1); case e_nand : return nand_opr<T>(arg0,arg1); case e_or : return or_opr <T>(arg0,arg1); case e_nor : return nor_opr <T>(arg0,arg1); case e_xor : return xor_opr <T>(arg0,arg1); case e_xnor : return xnor_opr<T>(arg0,arg1); case e_root : return root <T>(arg0,arg1); case e_roundn : return roundn <T>(arg0,arg1); case e_equal : return equal (arg0,arg1); case e_nequal : return nequal (arg0,arg1); case e_hypot : return hypot <T>(arg0,arg1); case e_shr : return shr <T>(arg0,arg1); case e_shl : return shl <T>(arg0,arg1); default : exprtk_debug(("numeric::details::process_impl<T> - Invalid binary operation.\n")); return std::numeric_limits<T>::quiet_NaN(); } } template <typename T> inline T process_impl(const operator_type operation, const T arg0, const T arg1, int_type_tag) { switch (operation) { case e_add : return (arg0 + arg1); case e_sub : return (arg0 - arg1); case e_mul : return (arg0 * arg1); case e_div : return (arg0 / arg1); case e_mod : return arg0 % arg1; case e_pow : return pow<T>(arg0,arg1); case e_min : return std::min<T>(arg0,arg1); case e_max : return std::max<T>(arg0,arg1); case e_logn : return logn<T>(arg0,arg1); case e_lt : return (arg0 < arg1) ? T(1) : T(0); case e_lte : return (arg0 <= arg1) ? T(1) : T(0); case e_eq : return (arg0 == arg1) ? T(1) : T(0); case e_ne : return (arg0 != arg1) ? T(1) : T(0); case e_gte : return (arg0 >= arg1) ? T(1) : T(0); case e_gt : return (arg0 > arg1) ? T(1) : T(0); case e_and : return ((arg0 != T(0)) && (arg1 != T(0))) ? T(1) : T(0); case e_nand : return ((arg0 != T(0)) && (arg1 != T(0))) ? T(0) : T(1); case e_or : return ((arg0 != T(0)) || (arg1 != T(0))) ? T(1) : T(0); case e_nor : return ((arg0 != T(0)) || (arg1 != T(0))) ? T(0) : T(1); case e_xor : return arg0 ^ arg1; case e_xnor : return !(arg0 ^ arg1); case e_root : return root<T>(arg0,arg1); case e_equal : return arg0 == arg1; case e_nequal : return arg0 != arg1; case e_hypot : return hypot<T>(arg0,arg1); case e_shr : return arg0 >> arg1; case e_shl : return arg0 << arg1; default : exprtk_debug(("numeric::details::process_impl<IntType> - Invalid binary operation.\n")); return std::numeric_limits<T>::quiet_NaN(); } } } template <typename T> inline T process(const operator_type operation, const T arg) { return exprtk::details::numeric::details::process_impl(operation,arg); } template <typename T> inline T process(const operator_type operation, const T arg0, const T arg1) { return exprtk::details::numeric::details::process_impl(operation,arg0,arg1); } } template <typename T> class expression_node { public: enum node_type { e_none , e_null , e_constant , e_unary , e_binary , e_binary_ext , e_trinary , e_quaternary , e_vararg , e_conditional , e_while , e_repeat , e_for , e_switch , e_mswitch , e_return , e_retenv , e_variable , e_stringvar , e_stringconst , e_stringvarrng , e_cstringvarrng, e_strgenrange , e_strconcat , e_stringvarsize, e_strswap , e_stringsize , e_stringvararg , e_function , e_vafunction , e_genfunction , e_strfunction , e_strcondition , e_strccondition, e_add , e_sub , e_mul , e_div , e_mod , e_pow , e_lt , e_lte , e_gt , e_gte , e_eq , e_ne , e_and , e_nand , e_or , e_nor , e_xor , e_xnor , e_in , e_like , e_ilike , e_inranges , e_ipow , e_ipowinv , e_abs , e_acos , e_acosh , e_asin , e_asinh , e_atan , e_atanh , e_ceil , e_cos , e_cosh , e_exp , e_expm1 , e_floor , e_log , e_log10 , e_log2 , e_log1p , e_neg , e_pos , e_round , e_sin , e_sinc , e_sinh , e_sqrt , e_tan , e_tanh , e_cot , e_sec , e_csc , e_r2d , e_d2r , e_d2g , e_g2d , e_notl , e_sgn , e_erf , e_erfc , e_ncdf , e_frac , e_trunc , e_uvouv , e_vov , e_cov , e_voc , e_vob , e_bov , e_cob , e_boc , e_vovov , e_vovoc , e_vocov , e_covov , e_covoc , e_vovovov , e_vovovoc , e_vovocov , e_vocovov , e_covovov , e_covocov , e_vocovoc , e_covovoc , e_vococov , e_sf3ext , e_sf4ext , e_nulleq , e_strass , e_vector , e_vecelem , e_rbvecelem , e_rbveccelem , e_vecdefass , e_vecvalass , e_vecvecass , e_vecopvalass , e_vecopvecass , e_vecfunc , e_vecvecswap , e_vecvecineq , e_vecvalineq , e_valvecineq , e_vecvecarith , e_vecvalarith , e_valvecarith , e_vecunaryop , e_break , e_continue , e_swap }; typedef T value_type; typedef expression_node<T>* expression_ptr; virtual ~expression_node() {} inline virtual T value() const { return std::numeric_limits<T>::quiet_NaN(); } inline virtual expression_node<T>* branch(const std::size_t& index = 0) const { return reinterpret_cast<expression_ptr>(index * 0); } inline virtual node_type type() const { return e_none; } }; template <typename T> inline bool is_generally_string_node(const expression_node<T>* node); inline bool is_true(const double v) { return std::not_equal_to<double>()(0.0,v); } inline bool is_true(const long double v) { return std::not_equal_to<long double>()(0.0L,v); } inline bool is_true(const float v) { return std::not_equal_to<float>()(0.0f,v); } template <typename T> inline bool is_true(const std::complex<T>& v) { return std::not_equal_to<std::complex<T> >()(std::complex<T>(0),v); } template <typename T> inline bool is_true(const expression_node<T>* node) { return std::not_equal_to<T>()(T(0),node->value()); } template <typename T> inline bool is_false(const expression_node<T>* node) { return std::equal_to<T>()(T(0),node->value()); } template <typename T> inline bool is_unary_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_unary == node->type()); } template <typename T> inline bool is_neg_unary_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_neg == node->type()); } template <typename T> inline bool is_binary_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_binary == node->type()); } template <typename T> inline bool is_variable_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_variable == node->type()); } template <typename T> inline bool is_ivariable_node(const expression_node<T>* node) { return node && ( details::expression_node<T>::e_variable == node->type() || details::expression_node<T>::e_vecelem == node->type() || details::expression_node<T>::e_rbvecelem == node->type() || details::expression_node<T>::e_rbveccelem == node->type() ); } template <typename T> inline bool is_vector_elem_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_vecelem == node->type()); } template <typename T> inline bool is_rebasevector_elem_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_rbvecelem == node->type()); } template <typename T> inline bool is_rebasevector_celem_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_rbveccelem == node->type()); } template <typename T> inline bool is_vector_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_vector == node->type()); } template <typename T> inline bool is_ivector_node(const expression_node<T>* node) { if (node) { switch (node->type()) { case details::expression_node<T>::e_vector : case details::expression_node<T>::e_vecvalass : case details::expression_node<T>::e_vecvecass : case details::expression_node<T>::e_vecopvalass : case details::expression_node<T>::e_vecopvecass : case details::expression_node<T>::e_vecvecswap : case details::expression_node<T>::e_vecvecarith : case details::expression_node<T>::e_vecvalarith : case details::expression_node<T>::e_valvecarith : case details::expression_node<T>::e_vecunaryop : return true; default : return false; } } else return false; } template <typename T> inline bool is_constant_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_constant == node->type()); } template <typename T> inline bool is_null_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_null == node->type()); } template <typename T> inline bool is_break_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_break == node->type()); } template <typename T> inline bool is_continue_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_continue == node->type()); } template <typename T> inline bool is_swap_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_swap == node->type()); } template <typename T> inline bool is_function(const expression_node<T>* node) { return node && (details::expression_node<T>::e_function == node->type()); } template <typename T> inline bool is_return_node(const expression_node<T>* node) { return node && (details::expression_node<T>::e_return == node->type()); } template <typename T> class unary_node; template <typename T> inline bool is_negate_node(const expression_node<T>* node) { if (node && is_unary_node(node)) { return (details::e_neg == static_cast<const unary_node<T>*>(node)->operation()); } else return false; } template <typename T> inline bool branch_deletable(expression_node<T>* node) { return !is_variable_node(node) && !is_string_node (node) ; } template <std::size_t N, typename T> inline bool all_nodes_valid(expression_node<T>* (&b)[N]) { for (std::size_t i = 0; i < N; ++i) { if (0 == b[i]) return false; } return true; } template <typename T, typename Allocator, template <typename,typename> class Sequence> inline bool all_nodes_valid(const Sequence<expression_node<T>*,Allocator>& b) { for (std::size_t i = 0; i < b.size(); ++i) { if (0 == b[i]) return false; } return true; } template <std::size_t N, typename T> inline bool all_nodes_variables(expression_node<T>* (&b)[N]) { for (std::size_t i = 0; i < N; ++i) { if (0 == b[i]) return false; else if (!is_variable_node(b[i])) return false; } return true; } template <typename T, typename Allocator, template <typename,typename> class Sequence> inline bool all_nodes_variables(Sequence<expression_node<T>*,Allocator>& b) { for (std::size_t i = 0; i < b.size(); ++i) { if (0 == b[i]) return false; else if (!is_variable_node(b[i])) return false; } return true; } template <typename NodeAllocator, typename T, std::size_t N> inline void free_all_nodes(NodeAllocator& node_allocator, expression_node<T>* (&b)[N]) { for (std::size_t i = 0; i < N; ++i) { free_node(node_allocator,b[i]); } } template <typename NodeAllocator, typename T, typename Allocator, template <typename,typename> class Sequence> inline void free_all_nodes(NodeAllocator& node_allocator, Sequence<expression_node<T>*,Allocator>& b) { for (std::size_t i = 0; i < b.size(); ++i) { free_node(node_allocator,b[i]); } b.clear(); } template <typename NodeAllocator, typename T> inline void free_node(NodeAllocator& node_allocator, expression_node<T>*& node, const bool force_delete = false) { if (0 != node) { if ( (is_variable_node(node) || is_string_node(node)) || force_delete ) return; node_allocator.free(node); node = reinterpret_cast<expression_node<T>*>(0); } } template <typename T> inline void destroy_node(expression_node<T>*& node) { delete node; node = reinterpret_cast<expression_node<T>*>(0); } template <typename Type> class vector_holder { private: typedef Type value_type; typedef value_type* value_ptr; typedef const value_ptr const_value_ptr; class vector_holder_base { public: virtual ~vector_holder_base() {} inline value_ptr operator[](const std::size_t& index) const { return value_at(index); } inline std::size_t size() const { return vector_size(); } inline value_ptr data() const { return value_at(0); } virtual inline bool rebaseable() const { return false; } virtual void set_ref(value_ptr*) {} protected: virtual value_ptr value_at(const std::size_t&) const = 0; virtual std::size_t vector_size() const = 0; }; class array_vector_impl : public vector_holder_base { public: array_vector_impl(const Type* vec, const std::size_t& vec_size) : vec_(vec), size_(vec_size) {} protected: value_ptr value_at(const std::size_t& index) const { if (index < size_) return const_cast<const_value_ptr>(vec_ + index); else return const_value_ptr(0); } std::size_t vector_size() const { return size_; } private: array_vector_impl operator=(const array_vector_impl&); const Type* vec_; const std::size_t size_; }; template <typename Allocator, template <typename,typename> class Sequence> class sequence_vector_impl : public vector_holder_base { public: typedef Sequence<Type,Allocator> sequence_t; sequence_vector_impl(sequence_t& seq) : sequence_(seq) {} protected: value_ptr value_at(const std::size_t& index) const { return (index < sequence_.size()) ? (&sequence_[index]) : const_value_ptr(0); } std::size_t vector_size() const { return sequence_.size(); } private: sequence_vector_impl operator=(const sequence_vector_impl&); sequence_t& sequence_; }; class vector_view_impl : public vector_holder_base { public: typedef exprtk::vector_view<Type> vector_view_t; vector_view_impl(vector_view_t& vec_view) : vec_view_(vec_view) {} void set_ref(value_ptr* ref) { vec_view_.set_ref(ref); } virtual inline bool rebaseable() const { return true; } protected: value_ptr value_at(const std::size_t& index) const { return (index < vec_view_.size()) ? (&vec_view_[index]) : const_value_ptr(0); } std::size_t vector_size() const { return vec_view_.size(); } private: vector_view_impl operator=(const vector_view_impl&); vector_view_t& vec_view_; }; public: typedef typename details::vec_data_store<Type> vds_t; vector_holder(Type* vec, const std::size_t& vec_size) : vector_holder_base_(new(buffer)array_vector_impl(vec,vec_size)) {} vector_holder(const vds_t& vds) : vector_holder_base_(new(buffer)array_vector_impl(vds.data(),vds.size())) {} template <typename Allocator> vector_holder(std::vector<Type,Allocator>& vec) : vector_holder_base_(new(buffer)sequence_vector_impl<Allocator,std::vector>(vec)) {} vector_holder(exprtk::vector_view<Type>& vec) : vector_holder_base_(new(buffer)vector_view_impl(vec)) {} inline value_ptr operator[](const std::size_t& index) const { return (*vector_holder_base_)[index]; } inline std::size_t size() const { return vector_holder_base_->size(); } inline value_ptr data() const { return vector_holder_base_->data(); } void set_ref(value_ptr* ref) { vector_holder_base_->set_ref(ref); } bool rebaseable() const { return vector_holder_base_->rebaseable(); } private: mutable vector_holder_base* vector_holder_base_; uchar_t buffer[64]; }; template <typename T> class null_node : public expression_node<T> { public: inline T value() const { return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_null; } }; template <typename T> class null_eq_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; null_eq_node(expression_ptr brnch, const bool equality = true) : branch_(brnch), branch_deletable_(branch_deletable(branch_)), equality_(equality) {} ~null_eq_node() { if (branch_ && branch_deletable_) { destroy_node(branch_); } } inline T value() const { const T v = branch_->value(); const bool result = details::numeric::is_nan(v); if (result) return (equality_) ? T(1) : T(0); else return (equality_) ? T(0) : T(1); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_nulleq; } inline operator_type operation() const { return details::e_eq; } inline expression_node<T>* branch(const std::size_t&) const { return branch_; } private: expression_ptr branch_; const bool branch_deletable_; bool equality_; }; template <typename T> class literal_node : public expression_node<T> { public: explicit literal_node(const T& v) : value_(v) {} inline T value() const { return value_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_constant; } inline expression_node<T>* branch(const std::size_t&) const { return reinterpret_cast<expression_node<T>*>(0); } private: literal_node(literal_node<T>&) {} literal_node<T>& operator=(literal_node<T>&) { return (*this); } const T value_; }; template <typename T> struct range_pack; template <typename T> struct range_data_type; template <typename T> class range_interface { public: typedef range_pack<T> range_t; virtual ~range_interface() {} virtual range_t& range_ref() = 0; virtual const range_t& range_ref() const = 0; }; #ifndef exprtk_disable_string_capabilities template <typename T> class string_base_node { public: typedef range_data_type<T> range_data_type_t; virtual ~string_base_node() {} virtual std::string str () const = 0; virtual char_cptr base() const = 0; virtual std::size_t size() const = 0; }; template <typename T> class string_literal_node : public expression_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef range_pack<T> range_t; explicit string_literal_node(const std::string& v) : value_(v) { rp_.n0_c = std::make_pair<bool,std::size_t>(true,0); rp_.n1_c = std::make_pair<bool,std::size_t>(true,v.size() - 1); rp_.cache.first = rp_.n0_c.second; rp_.cache.second = rp_.n1_c.second; } inline T value() const { return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_stringconst; } inline expression_node<T>* branch(const std::size_t&) const { return reinterpret_cast<expression_node<T>*>(0); } std::string str() const { return value_; } char_cptr base() const { return value_.data(); } std::size_t size() const { return value_.size(); } range_t& range_ref() { return rp_; } const range_t& range_ref() const { return rp_; } private: string_literal_node(const string_literal_node<T>&); string_literal_node<T>& operator=(const string_literal_node<T>&); const std::string value_; range_t rp_; }; #endif template <typename T> class unary_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; unary_node(const operator_type& opr, expression_ptr brnch) : operation_(opr), branch_(brnch), branch_deletable_(branch_deletable(branch_)) {} ~unary_node() { if (branch_ && branch_deletable_) { destroy_node(branch_); } } inline T value() const { const T arg = branch_->value(); return numeric::process<T>(operation_,arg); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_unary; } inline operator_type operation() const { return operation_; } inline expression_node<T>* branch(const std::size_t&) const { return branch_; } inline void release() { branch_deletable_ = false; } protected: operator_type operation_; expression_ptr branch_; bool branch_deletable_; }; template <typename T, std::size_t D, bool B> struct construct_branch_pair { template <std::size_t N> static inline void process(std::pair<expression_node<T>*,bool> (&)[N], expression_node<T>*) {} }; template <typename T, std::size_t D> struct construct_branch_pair<T,D,true> { template <std::size_t N> static inline void process(std::pair<expression_node<T>*,bool> (&branch)[N], expression_node<T>* b) { if (b) { branch[D] = std::make_pair(b,branch_deletable(b)); } } }; template <std::size_t N, typename T> inline void init_branches(std::pair<expression_node<T>*,bool> (&branch)[N], expression_node<T>* b0, expression_node<T>* b1 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b2 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b3 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b4 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b5 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b6 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b7 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b8 = reinterpret_cast<expression_node<T>*>(0), expression_node<T>* b9 = reinterpret_cast<expression_node<T>*>(0)) { construct_branch_pair<T,0,(N > 0)>::process(branch,b0); construct_branch_pair<T,1,(N > 1)>::process(branch,b1); construct_branch_pair<T,2,(N > 2)>::process(branch,b2); construct_branch_pair<T,3,(N > 3)>::process(branch,b3); construct_branch_pair<T,4,(N > 4)>::process(branch,b4); construct_branch_pair<T,5,(N > 5)>::process(branch,b5); construct_branch_pair<T,6,(N > 6)>::process(branch,b6); construct_branch_pair<T,7,(N > 7)>::process(branch,b7); construct_branch_pair<T,8,(N > 8)>::process(branch,b8); construct_branch_pair<T,9,(N > 9)>::process(branch,b9); } struct cleanup_branches { template <typename T, std::size_t N> static inline void execute(std::pair<expression_node<T>*,bool> (&branch)[N]) { for (std::size_t i = 0; i < N; ++i) { if (branch[i].first && branch[i].second) { destroy_node(branch[i].first); } } } template <typename T, typename Allocator, template <typename,typename> class Sequence> static inline void execute(Sequence<std::pair<expression_node<T>*,bool>,Allocator>& branch) { for (std::size_t i = 0; i < branch.size(); ++i) { if (branch[i].first && branch[i].second) { destroy_node(branch[i].first); } } } }; template <typename T> class binary_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; binary_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : operation_(opr) { init_branches<2>(branch_, branch0, branch1); } ~binary_node() { cleanup_branches::execute<T,2>(branch_); } inline T value() const { const T arg0 = branch_[0].first->value(); const T arg1 = branch_[1].first->value(); return numeric::process<T>(operation_,arg0,arg1); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_binary; } inline operator_type operation() { return operation_; } inline expression_node<T>* branch(const std::size_t& index = 0) const { if (0 == index) return branch_[0].first; else if (1 == index) return branch_[1].first; else return reinterpret_cast<expression_ptr>(0); } protected: operator_type operation_; branch_t branch_[2]; }; template <typename T, typename Operation> class binary_ext_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; binary_ext_node(expression_ptr branch0, expression_ptr branch1) { init_branches<2>(branch_, branch0, branch1); } ~binary_ext_node() { cleanup_branches::execute<T,2>(branch_); } inline T value() const { const T arg0 = branch_[0].first->value(); const T arg1 = branch_[1].first->value(); return Operation::process(arg0,arg1); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_binary_ext; } inline operator_type operation() { return Operation::operation(); } inline expression_node<T>* branch(const std::size_t& index = 0) const { if (0 == index) return branch_[0].first; else if (1 == index) return branch_[1].first; else return reinterpret_cast<expression_ptr>(0); } protected: branch_t branch_[2]; }; template <typename T> class trinary_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; trinary_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1, expression_ptr branch2) : operation_(opr) { init_branches<3>(branch_, branch0, branch1, branch2); } ~trinary_node() { cleanup_branches::execute<T,3>(branch_); } inline T value() const { const T arg0 = branch_[0].first->value(); const T arg1 = branch_[1].first->value(); const T arg2 = branch_[2].first->value(); switch (operation_) { case e_inrange : return (arg1 < arg0) ? T(0) : ((arg1 > arg2) ? T(0) : T(1)); case e_clamp : return (arg1 < arg0) ? arg0 : (arg1 > arg2 ? arg2 : arg1); case e_iclamp : if ((arg1 <= arg0) || (arg1 >= arg2)) return arg1; else return ((T(2) * arg1 <= (arg2 + arg0)) ? arg0 : arg2); default : exprtk_debug(("trinary_node::value() - Error: Invalid operation\n")); return std::numeric_limits<T>::quiet_NaN(); } } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_trinary; } protected: operator_type operation_; branch_t branch_[3]; }; template <typename T> class quaternary_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; quaternary_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1, expression_ptr branch2, expression_ptr branch3) : operation_(opr) { init_branches<4>(branch_, branch0, branch1, branch2, branch3); } ~quaternary_node() { cleanup_branches::execute<T,4>(branch_); } inline T value() const { return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_quaternary; } protected: operator_type operation_; branch_t branch_[4]; }; template <typename T> class conditional_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; conditional_node(expression_ptr test, expression_ptr consequent, expression_ptr alternative) : test_(test), consequent_(consequent), alternative_(alternative), test_deletable_(branch_deletable(test_)), consequent_deletable_(branch_deletable(consequent_)), alternative_deletable_(branch_deletable(alternative_)) {} ~conditional_node() { if (test_ && test_deletable_) { destroy_node(test_); } if (consequent_ && consequent_deletable_ ) { destroy_node(consequent_); } if (alternative_ && alternative_deletable_) { destroy_node(alternative_); } } inline T value() const { if (is_true(test_)) return consequent_->value(); else return alternative_->value(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_conditional; } private: expression_ptr test_; expression_ptr consequent_; expression_ptr alternative_; const bool test_deletable_; const bool consequent_deletable_; const bool alternative_deletable_; }; template <typename T> class cons_conditional_node : public expression_node<T> { public: // Consequent only conditional statement node typedef expression_node<T>* expression_ptr; cons_conditional_node(expression_ptr test, expression_ptr consequent) : test_(test), consequent_(consequent), test_deletable_(branch_deletable(test_)), consequent_deletable_(branch_deletable(consequent_)) {} ~cons_conditional_node() { if (test_ && test_deletable_) { destroy_node(test_); } if (consequent_ && consequent_deletable_) { destroy_node(consequent_); } } inline T value() const { if (is_true(test_)) return consequent_->value(); else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_conditional; } private: expression_ptr test_; expression_ptr consequent_; const bool test_deletable_; const bool consequent_deletable_; }; #ifndef exprtk_disable_break_continue template <typename T> class break_exception { public: break_exception(const T& v) : value(v) {} T value; }; class continue_exception {}; template <typename T> class break_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; break_node(expression_ptr ret = expression_ptr(0)) : return_(ret), return_deletable_(branch_deletable(return_)) {} ~break_node() { if (return_deletable_) { destroy_node(return_); } } inline T value() const { throw break_exception<T>(return_ ? return_->value() : std::numeric_limits<T>::quiet_NaN()); #ifndef _MSC_VER return std::numeric_limits<T>::quiet_NaN(); #endif } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_break; } private: expression_ptr return_; const bool return_deletable_; }; template <typename T> class continue_node : public expression_node<T> { public: inline T value() const { throw continue_exception(); #ifndef _MSC_VER return std::numeric_limits<T>::quiet_NaN(); #endif } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_break; } }; #endif template <typename T> class while_loop_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; while_loop_node(expression_ptr condition, expression_ptr loop_body) : condition_(condition), loop_body_(loop_body), condition_deletable_(branch_deletable(condition_)), loop_body_deletable_(branch_deletable(loop_body_)) {} ~while_loop_node() { if (condition_ && condition_deletable_) { destroy_node(condition_); } if (loop_body_ && loop_body_deletable_) { destroy_node(loop_body_); } } inline T value() const { T result = T(0); while (is_true(condition_)) { result = loop_body_->value(); } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_while; } private: expression_ptr condition_; expression_ptr loop_body_; const bool condition_deletable_; const bool loop_body_deletable_; }; template <typename T> class repeat_until_loop_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; repeat_until_loop_node(expression_ptr condition, expression_ptr loop_body) : condition_(condition), loop_body_(loop_body), condition_deletable_(branch_deletable(condition_)), loop_body_deletable_(branch_deletable(loop_body_)) {} ~repeat_until_loop_node() { if (condition_ && condition_deletable_) { destroy_node(condition_); } if (loop_body_ && loop_body_deletable_) { destroy_node(loop_body_); } } inline T value() const { T result = T(0); do { result = loop_body_->value(); } while (is_false(condition_)); return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_repeat; } private: expression_ptr condition_; expression_ptr loop_body_; const bool condition_deletable_; const bool loop_body_deletable_; }; template <typename T> class for_loop_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; for_loop_node(expression_ptr initialiser, expression_ptr condition, expression_ptr incrementor, expression_ptr loop_body) : initialiser_(initialiser), condition_ (condition ), incrementor_(incrementor), loop_body_ (loop_body ), initialiser_deletable_(branch_deletable(initialiser_)), condition_deletable_ (branch_deletable(condition_ )), incrementor_deletable_(branch_deletable(incrementor_)), loop_body_deletable_ (branch_deletable(loop_body_ )) {} ~for_loop_node() { if (initialiser_ && initialiser_deletable_) { destroy_node(initialiser_); } if (condition_ && condition_deletable_) { destroy_node(condition_); } if (incrementor_ && incrementor_deletable_) { destroy_node(incrementor_); } if (loop_body_ && loop_body_deletable_) { destroy_node(loop_body_); } } inline T value() const { T result = T(0); if (initialiser_) initialiser_->value(); if (incrementor_) { while (is_true(condition_)) { result = loop_body_->value(); incrementor_->value(); } } else { while (is_true(condition_)) { result = loop_body_->value(); } } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_for; } private: expression_ptr initialiser_ ; expression_ptr condition_ ; expression_ptr incrementor_ ; expression_ptr loop_body_ ; const bool initialiser_deletable_; const bool condition_deletable_ ; const bool incrementor_deletable_; const bool loop_body_deletable_ ; }; #ifndef exprtk_disable_break_continue template <typename T> class while_loop_bc_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; while_loop_bc_node(expression_ptr condition, expression_ptr loop_body) : condition_(condition), loop_body_(loop_body), condition_deletable_(branch_deletable(condition_)), loop_body_deletable_(branch_deletable(loop_body_)) {} ~while_loop_bc_node() { if (condition_ && condition_deletable_) { destroy_node(condition_); } if (loop_body_ && loop_body_deletable_) { destroy_node(loop_body_); } } inline T value() const { T result = T(0); while (is_true(condition_)) { try { result = loop_body_->value(); } catch(const break_exception<T>& e) { return e.value; } catch(const continue_exception&) {} } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_while; } private: expression_ptr condition_; expression_ptr loop_body_; const bool condition_deletable_; const bool loop_body_deletable_; }; template <typename T> class repeat_until_loop_bc_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; repeat_until_loop_bc_node(expression_ptr condition, expression_ptr loop_body) : condition_(condition), loop_body_(loop_body), condition_deletable_(branch_deletable(condition_)), loop_body_deletable_(branch_deletable(loop_body_)) {} ~repeat_until_loop_bc_node() { if (condition_ && condition_deletable_) { destroy_node(condition_); } if (loop_body_ && loop_body_deletable_) { destroy_node(loop_body_); } } inline T value() const { T result = T(0); do { try { result = loop_body_->value(); } catch(const break_exception<T>& e) { return e.value; } catch(const continue_exception&) {} } while (is_false(condition_)); return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_repeat; } private: expression_ptr condition_; expression_ptr loop_body_; const bool condition_deletable_; const bool loop_body_deletable_; }; template <typename T> class for_loop_bc_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; for_loop_bc_node(expression_ptr initialiser, expression_ptr condition, expression_ptr incrementor, expression_ptr loop_body) : initialiser_(initialiser), condition_ (condition ), incrementor_(incrementor), loop_body_ (loop_body ), initialiser_deletable_(branch_deletable(initialiser_)), condition_deletable_ (branch_deletable(condition_ )), incrementor_deletable_(branch_deletable(incrementor_)), loop_body_deletable_ (branch_deletable(loop_body_ )) {} ~for_loop_bc_node() { if (initialiser_ && initialiser_deletable_) { destroy_node(initialiser_); } if (condition_ && condition_deletable_) { destroy_node(condition_); } if (incrementor_ && incrementor_deletable_) { destroy_node(incrementor_); } if (loop_body_ && loop_body_deletable_) { destroy_node(loop_body_); } } inline T value() const { T result = T(0); if (initialiser_) initialiser_->value(); if (incrementor_) { while (is_true(condition_)) { try { result = loop_body_->value(); } catch(const break_exception<T>& e) { return e.value; } catch(const continue_exception&) {} incrementor_->value(); } } else { while (is_true(condition_)) { try { result = loop_body_->value(); } catch(const break_exception<T>& e) { return e.value; } catch(const continue_exception&) {} } } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_for; } private: expression_ptr initialiser_; expression_ptr condition_ ; expression_ptr incrementor_; expression_ptr loop_body_ ; const bool initialiser_deletable_; const bool condition_deletable_ ; const bool incrementor_deletable_; const bool loop_body_deletable_ ; }; #endif template <typename T> class switch_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; template <typename Allocator, template <typename,typename> class Sequence> switch_node(const Sequence<expression_ptr,Allocator>& arg_list) { if (1 != (arg_list.size() & 1)) return; arg_list_.resize(arg_list.size()); delete_branch_.resize(arg_list.size()); for (std::size_t i = 0; i < arg_list.size(); ++i) { if (arg_list[i]) { arg_list_[i] = arg_list[i]; delete_branch_[i] = static_cast<unsigned char>(branch_deletable(arg_list_[i]) ? 1 : 0); } else { arg_list_.clear(); delete_branch_.clear(); return; } } } ~switch_node() { for (std::size_t i = 0; i < arg_list_.size(); ++i) { if (arg_list_[i] && delete_branch_[i]) { destroy_node(arg_list_[i]); } } } inline T value() const { if (!arg_list_.empty()) { const std::size_t upper_bound = (arg_list_.size() - 1); for (std::size_t i = 0; i < upper_bound; i += 2) { expression_ptr condition = arg_list_[i ]; expression_ptr consequent = arg_list_[i + 1]; if (is_true(condition)) { return consequent->value(); } } return arg_list_[upper_bound]->value(); } else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_switch; } protected: std::vector<expression_ptr> arg_list_; std::vector<unsigned char> delete_branch_; }; template <typename T, typename Switch_N> class switch_n_node : public switch_node<T> { public: typedef expression_node<T>* expression_ptr; template <typename Allocator, template <typename,typename> class Sequence> switch_n_node(const Sequence<expression_ptr,Allocator>& arg_list) : switch_node<T>(arg_list) {} inline T value() const { return Switch_N::process(switch_node<T>::arg_list_); } }; template <typename T> class multi_switch_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; template <typename Allocator, template <typename,typename> class Sequence> multi_switch_node(const Sequence<expression_ptr,Allocator>& arg_list) { if (0 != (arg_list.size() & 1)) return; arg_list_.resize(arg_list.size()); delete_branch_.resize(arg_list.size()); for (std::size_t i = 0; i < arg_list.size(); ++i) { if (arg_list[i]) { arg_list_[i] = arg_list[i]; delete_branch_[i] = static_cast<unsigned char>(branch_deletable(arg_list_[i]) ? 1 : 0); } else { arg_list_.clear(); delete_branch_.clear(); return; } } } ~multi_switch_node() { for (std::size_t i = 0; i < arg_list_.size(); ++i) { if (arg_list_[i] && delete_branch_[i]) { destroy_node(arg_list_[i]); } } } inline T value() const { T result = T(0); if (arg_list_.empty()) { return std::numeric_limits<T>::quiet_NaN(); } const std::size_t upper_bound = (arg_list_.size() - 1); for (std::size_t i = 0; i < upper_bound; i += 2) { expression_ptr condition = arg_list_[i ]; expression_ptr consequent = arg_list_[i + 1]; if (is_true(condition)) { result = consequent->value(); } } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_mswitch; } private: std::vector<expression_ptr> arg_list_; std::vector<unsigned char> delete_branch_; }; template <typename T> class ivariable { public: virtual ~ivariable() {} virtual T& ref() = 0; virtual const T& ref() const = 0; }; template <typename T> class variable_node : public expression_node<T>, public ivariable <T> { public: static T null_value; explicit variable_node() : value_(&null_value) {} variable_node(T& v) : value_(&v) {} inline bool operator <(const variable_node<T>& v) const { return this < (&v); } inline T value() const { return (*value_); } inline T& ref() { return (*value_); } inline const T& ref() const { return (*value_); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_variable; } private: T* value_; }; template <typename T> T variable_node<T>::null_value = T(std::numeric_limits<T>::quiet_NaN()); template <typename T> struct range_pack { typedef expression_node<T>* expression_node_ptr; typedef std::pair<std::size_t,std::size_t> cached_range_t; range_pack() : n0_e (std::make_pair(false,expression_node_ptr(0))), n1_e (std::make_pair(false,expression_node_ptr(0))), n0_c (std::make_pair(false,0)), n1_c (std::make_pair(false,0)), cache(std::make_pair(0,0)) {} void clear() { n0_e = std::make_pair(false,expression_node_ptr(0)); n1_e = std::make_pair(false,expression_node_ptr(0)); n0_c = std::make_pair(false,0); n1_c = std::make_pair(false,0); cache = std::make_pair(0,0); } void free() { if (n0_e.first && n0_e.second) { n0_e.first = false; if ( !is_variable_node(n0_e.second) && !is_string_node (n0_e.second) ) { destroy_node(n0_e.second); } } if (n1_e.first && n1_e.second) { n1_e.first = false; if ( !is_variable_node(n1_e.second) && !is_string_node (n1_e.second) ) { destroy_node(n1_e.second); } } } bool const_range() { return ( n0_c.first && n1_c.first) && (!n0_e.first && !n1_e.first); } bool var_range() { return ( n0_e.first && n1_e.first) && (!n0_c.first && !n1_c.first); } bool operator() (std::size_t& r0, std::size_t& r1, const std::size_t& size = std::numeric_limits<std::size_t>::max()) const { if (n0_c.first) r0 = n0_c.second; else if (n0_e.first) { T r0_value = n0_e.second->value(); if (r0_value < 0) return false; else r0 = static_cast<std::size_t>(details::numeric::to_int64(r0_value)); } else return false; if (n1_c.first) r1 = n1_c.second; else if (n1_e.first) { T r1_value = n1_e.second->value(); if (r1_value < 0) return false; else r1 = static_cast<std::size_t>(details::numeric::to_int64(r1_value)); } else return false; if ( (std::numeric_limits<std::size_t>::max() != size) && (std::numeric_limits<std::size_t>::max() == r1 ) ) { r1 = size - 1; } cache.first = r0; cache.second = r1; return (r0 <= r1); } inline std::size_t const_size() const { return (n1_c.second - n0_c.second + 1); } inline std::size_t cache_size() const { return (cache.second - cache.first + 1); } std::pair<bool,expression_node_ptr> n0_e; std::pair<bool,expression_node_ptr> n1_e; std::pair<bool,std::size_t > n0_c; std::pair<bool,std::size_t > n1_c; mutable cached_range_t cache; }; template <typename T> class string_base_node; template <typename T> struct range_data_type { typedef range_pack<T> range_t; typedef string_base_node<T>* strbase_ptr_t; range_data_type() : range(0), data (0), size (0), type_size(0), str_node (0) {} range_t* range; void* data; std::size_t size; std::size_t type_size; strbase_ptr_t str_node; }; template <typename T> class vector_node; template <typename T> class vector_interface { public: typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; virtual ~vector_interface() {} virtual std::size_t size () const = 0; virtual vector_node_ptr vec() const = 0; virtual vector_node_ptr vec() = 0; virtual vds_t& vds () = 0; virtual const vds_t& vds () const = 0; virtual bool side_effect () const { return false; } }; template <typename T> class vector_node : public expression_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_holder<T> vector_holder_t; typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; vector_node(vector_holder_t* vh) : vector_holder_(vh), vds_((*vector_holder_).size(),(*vector_holder_)[0]) { vector_holder_->set_ref(&vds_.ref()); } vector_node(const vds_t& vds, vector_holder_t* vh) : vector_holder_(vh), vds_(vds) {} inline T value() const { return vds().data()[0]; } vector_node_ptr vec() const { return const_cast<vector_node_ptr>(this); } vector_node_ptr vec() { return this; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vector; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } inline vector_holder_t& vec_holder() { return (*vector_holder_); } private: vector_holder_t* vector_holder_; vds_t vds_; }; template <typename T> class vector_elem_node : public expression_node<T>, public ivariable <T> { public: typedef expression_node<T>* expression_ptr; typedef vector_holder<T> vector_holder_t; typedef vector_holder_t* vector_holder_ptr; vector_elem_node(expression_ptr index, vector_holder_ptr vec_holder) : index_(index), vec_holder_(vec_holder), vector_base_((*vec_holder)[0]), index_deletable_(branch_deletable(index_)) {} ~vector_elem_node() { if (index_ && index_deletable_) { destroy_node(index_); } } inline T value() const { return *(vector_base_ + static_cast<std::size_t>(details::numeric::to_int64(index_->value()))); } inline T& ref() { return *(vector_base_ + static_cast<std::size_t>(details::numeric::to_int64(index_->value()))); } inline const T& ref() const { return *(vector_base_ + static_cast<std::size_t>(details::numeric::to_int64(index_->value()))); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecelem; } inline vector_holder_t& vec_holder() { return (*vec_holder_); } private: expression_ptr index_; vector_holder_ptr vec_holder_; T* vector_base_; const bool index_deletable_; }; template <typename T> class rebasevector_elem_node : public expression_node<T>, public ivariable <T> { public: typedef expression_node<T>* expression_ptr; typedef vector_holder<T> vector_holder_t; typedef vector_holder_t* vector_holder_ptr; typedef vec_data_store<T> vds_t; rebasevector_elem_node(expression_ptr index, vector_holder_ptr vec_holder) : index_(index), index_deletable_(branch_deletable(index_)), vector_holder_(vec_holder), vds_((*vector_holder_).size(),(*vector_holder_)[0]) { vector_holder_->set_ref(&vds_.ref()); } ~rebasevector_elem_node() { if (index_ && index_deletable_) { destroy_node(index_); } } inline T value() const { return *(vds_.data() + static_cast<std::size_t>(details::numeric::to_int64(index_->value()))); } inline T& ref() { return *(vds_.data() + static_cast<std::size_t>(details::numeric::to_int64(index_->value()))); } inline const T& ref() const { return *(vds_.data() + static_cast<std::size_t>(details::numeric::to_int64(index_->value()))); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_rbvecelem; } inline vector_holder_t& vec_holder() { return (*vector_holder_); } private: expression_ptr index_; const bool index_deletable_; vector_holder_ptr vector_holder_; vds_t vds_; }; template <typename T> class rebasevector_celem_node : public expression_node<T>, public ivariable <T> { public: typedef expression_node<T>* expression_ptr; typedef vector_holder<T> vector_holder_t; typedef vector_holder_t* vector_holder_ptr; typedef vec_data_store<T> vds_t; rebasevector_celem_node(const std::size_t index, vector_holder_ptr vec_holder) : index_(index), vector_holder_(vec_holder), vds_((*vector_holder_).size(),(*vector_holder_)[0]) { vector_holder_->set_ref(&vds_.ref()); } inline T value() const { return *(vds_.data() + index_); } inline T& ref() { return *(vds_.data() + index_); } inline const T& ref() const { return *(vds_.data() + index_); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_rbveccelem; } inline vector_holder_t& vec_holder() { return (*vector_holder_); } private: const std::size_t index_; vector_holder_ptr vector_holder_; vds_t vds_; }; template <typename T> class vector_assignment_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; vector_assignment_node(T* vector_base, const std::size_t& size, const std::vector<expression_ptr>& initialiser_list, const bool single_value_initialse) : vector_base_(vector_base), initialiser_list_(initialiser_list), size_(size), single_value_initialse_(single_value_initialse) {} ~vector_assignment_node() { for (std::size_t i = 0; i < initialiser_list_.size(); ++i) { if (branch_deletable(initialiser_list_[i])) { destroy_node(initialiser_list_[i]); } } } inline T value() const { if (single_value_initialse_) { for (std::size_t i = 0; i < size_; ++i) { *(vector_base_ + i) = initialiser_list_[0]->value(); } } else { std::size_t il_size = initialiser_list_.size(); for (std::size_t i = 0; i < il_size; ++i) { *(vector_base_ + i) = initialiser_list_[i]->value(); } if (il_size < size_) { for (std::size_t i = il_size; i < size_; ++i) { *(vector_base_ + i) = T(0); } } } return *(vector_base_); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecdefass; } private: vector_assignment_node<T>& operator=(const vector_assignment_node<T>&); mutable T* vector_base_; std::vector<expression_ptr> initialiser_list_; const std::size_t size_; const bool single_value_initialse_; }; template <typename T> class swap_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef variable_node<T>* variable_node_ptr; swap_node(variable_node_ptr var0, variable_node_ptr var1) : var0_(var0), var1_(var1) {} inline T value() const { std::swap(var0_->ref(),var1_->ref()); return var1_->ref(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_swap; } private: variable_node_ptr var0_; variable_node_ptr var1_; }; template <typename T> class swap_generic_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; typedef ivariable<T>* ivariable_ptr; swap_generic_node(expression_ptr var0, expression_ptr var1) : binary_node<T>(details::e_swap, var0, var1), var0_(dynamic_cast<ivariable_ptr>(var0)), var1_(dynamic_cast<ivariable_ptr>(var1)) {} inline T value() const { std::swap(var0_->ref(),var1_->ref()); return var1_->ref(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_swap; } private: ivariable_ptr var0_; ivariable_ptr var1_; }; template <typename T> class swap_vecvec_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; swap_vecvec_node(expression_ptr branch0, expression_ptr branch1) : binary_node<T>(details::e_swap, branch0, branch1), vec0_node_ptr_(0), vec1_node_ptr_(0), vec_size_ (0), initialised_ (false) { if (is_ivector_node(binary_node<T>::branch_[0].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[0].first))) { vec0_node_ptr_ = vi->vec(); vds() = vi->vds(); } } if (is_ivector_node(binary_node<T>::branch_[1].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[1].first))) { vec1_node_ptr_ = vi->vec(); } } if (vec0_node_ptr_ && vec1_node_ptr_) { vec_size_ = std::min(vec0_node_ptr_->vds().size(), vec1_node_ptr_->vds().size()); initialised_ = true; } } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); T* vec0 = vec0_node_ptr_->vds().data(); T* vec1 = vec1_node_ptr_->vds().data(); for (std::size_t i = 0; i < vec_size_; ++i) { std::swap(vec0[i],vec1[i]); } return vec1_node_ptr_->value(); } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return vec0_node_ptr_; } vector_node_ptr vec() { return vec0_node_ptr_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecvecswap; } std::size_t size() const { return vec_size_; } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node<T>* vec0_node_ptr_; vector_node<T>* vec1_node_ptr_; std::size_t vec_size_; bool initialised_; vds_t vds_; }; #ifndef exprtk_disable_string_capabilities template <typename T> class stringvar_node : public expression_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef range_pack<T> range_t; static std::string null_value; explicit stringvar_node() : value_(&null_value) {} explicit stringvar_node(std::string& v) : value_(&v) { rp_.n0_c = std::make_pair<bool,std::size_t>(true,0); rp_.n1_c = std::make_pair<bool,std::size_t>(true,v.size() - 1); rp_.cache.first = rp_.n0_c.second; rp_.cache.second = rp_.n1_c.second; } inline bool operator <(const stringvar_node<T>& v) const { return this < (&v); } inline T value() const { rp_.n1_c.second = (*value_).size() - 1; rp_.cache.second = rp_.n1_c.second; return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return ref(); } char_cptr base() const { return &(*value_)[0]; } std::size_t size() const { return ref().size(); } std::string& ref() { return (*value_); } const std::string& ref() const { return (*value_); } range_t& range_ref() { return rp_; } const range_t& range_ref() const { return rp_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_stringvar; } private: std::string* value_; mutable range_t rp_; }; template <typename T> std::string stringvar_node<T>::null_value = std::string(""); template <typename T> class string_range_node : public expression_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef range_pack<T> range_t; static std::string null_value; explicit string_range_node(std::string& v, const range_t& rp) : value_(&v), rp_(rp) {} virtual ~string_range_node() { rp_.free(); } inline bool operator <(const string_range_node<T>& v) const { return this < (&v); } inline T value() const { return std::numeric_limits<T>::quiet_NaN(); } inline std::string str() const { return (*value_); } char_cptr base() const { return &(*value_)[0]; } std::size_t size() const { return ref().size(); } inline range_t range() const { return rp_; } inline virtual std::string& ref() { return (*value_); } inline virtual const std::string& ref() const { return (*value_); } inline range_t& range_ref() { return rp_; } inline const range_t& range_ref() const { return rp_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_stringvarrng; } private: std::string* value_; range_t rp_; }; template <typename T> std::string string_range_node<T>::null_value = std::string(""); template <typename T> class const_string_range_node : public expression_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef range_pack<T> range_t; explicit const_string_range_node(const std::string& v, const range_t& rp) : value_(v), rp_(rp) {} ~const_string_range_node() { rp_.free(); } inline T value() const { return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return value_; } char_cptr base() const { return value_.data(); } std::size_t size() const { return value_.size(); } range_t range() const { return rp_; } range_t& range_ref() { return rp_; } const range_t& range_ref() const { return rp_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_cstringvarrng; } private: const_string_range_node<T>& operator=(const const_string_range_node<T>&); const std::string value_; range_t rp_; }; template <typename T> class generic_string_range_node : public expression_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef stringvar_node <T>* strvar_node_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; generic_string_range_node(expression_ptr str_branch, const range_t& brange) : initialised_(false), branch_(str_branch), branch_deletable_(branch_deletable(branch_)), str_base_ptr_ (0), str_range_ptr_(0), base_range_(brange) { range_.n0_c = std::make_pair<bool,std::size_t>(true,0); range_.n1_c = std::make_pair<bool,std::size_t>(true,0); range_.cache.first = range_.n0_c.second; range_.cache.second = range_.n1_c.second; if (is_generally_string_node(branch_)) { str_base_ptr_ = dynamic_cast<str_base_ptr>(branch_); if (0 == str_base_ptr_) return; str_range_ptr_ = dynamic_cast<irange_ptr>(branch_); if (0 == str_range_ptr_) return; } initialised_ = (str_base_ptr_ && str_range_ptr_); } ~generic_string_range_node() { base_range_.free(); if (branch_ && branch_deletable_) { destroy_node(branch_); } } inline T value() const { if (initialised_) { branch_->value(); std::size_t str_r0 = 0; std::size_t str_r1 = 0; std::size_t r0 = 0; std::size_t r1 = 0; range_t& range = str_range_ptr_->range_ref(); const std::size_t base_str_size = str_base_ptr_->size(); if ( range (str_r0,str_r1,base_str_size) && base_range_( r0, r1,base_str_size) ) { const std::size_t size = (r1 - r0) + 1; range_.n1_c.second = size - 1; range_.cache.second = range_.n1_c.second; value_.assign(str_base_ptr_->base() + str_r0 + r0, size); } } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return value_; } char_cptr base() const { return &value_[0]; } std::size_t size() const { return value_.size(); } range_t& range_ref() { return range_; } const range_t& range_ref() const { return range_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strgenrange; } private: bool initialised_; expression_ptr branch_; const bool branch_deletable_; str_base_ptr str_base_ptr_; irange_ptr str_range_ptr_; mutable range_t base_range_; mutable range_t range_; mutable std::string value_; }; template <typename T> class string_concat_node : public binary_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; string_concat_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), initialised_(false), str0_base_ptr_ (0), str1_base_ptr_ (0), str0_range_ptr_(0), str1_range_ptr_(0) { range_.n0_c = std::make_pair<bool,std::size_t>(true,0); range_.n1_c = std::make_pair<bool,std::size_t>(true,0); range_.cache.first = range_.n0_c.second; range_.cache.second = range_.n1_c.second; if (is_generally_string_node(binary_node<T>::branch_[0].first)) { str0_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[0].first); if (0 == str0_base_ptr_) return; str0_range_ptr_ = dynamic_cast<irange_ptr>(binary_node<T>::branch_[0].first); if (0 == str0_range_ptr_) return; } if (is_generally_string_node(binary_node<T>::branch_[1].first)) { str1_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[1].first); if (0 == str1_base_ptr_) return; str1_range_ptr_ = dynamic_cast<irange_ptr>(binary_node<T>::branch_[1].first); if (0 == str1_range_ptr_) return; } initialised_ = str0_base_ptr_ && str1_base_ptr_ && str0_range_ptr_ && str1_range_ptr_ ; } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); std::size_t str0_r0 = 0; std::size_t str0_r1 = 0; std::size_t str1_r0 = 0; std::size_t str1_r1 = 0; range_t& range0 = str0_range_ptr_->range_ref(); range_t& range1 = str1_range_ptr_->range_ref(); if ( range0(str0_r0,str0_r1,str0_base_ptr_->size()) && range1(str1_r0,str1_r1,str1_base_ptr_->size()) ) { const std::size_t size0 = (str0_r1 - str0_r0) + 1; const std::size_t size1 = (str1_r1 - str1_r0) + 1; value_.assign(str0_base_ptr_->base() + str0_r0, size0); value_.append(str1_base_ptr_->base() + str1_r0, size1); range_.n1_c.second = value_.size() - 1; range_.cache.second = range_.n1_c.second; } } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return value_; } char_cptr base() const { return &value_[0]; } std::size_t size() const { return value_.size(); } range_t& range_ref() { return range_; } const range_t& range_ref() const { return range_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strconcat; } private: bool initialised_; str_base_ptr str0_base_ptr_; str_base_ptr str1_base_ptr_; irange_ptr str0_range_ptr_; irange_ptr str1_range_ptr_; mutable range_t range_; mutable std::string value_; }; template <typename T> class swap_string_node : public binary_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef stringvar_node <T>* strvar_node_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; swap_string_node(expression_ptr branch0, expression_ptr branch1) : binary_node<T>(details::e_swap, branch0, branch1), initialised_(false), str0_node_ptr_(0), str1_node_ptr_(0) { if (is_string_node(binary_node<T>::branch_[0].first)) { str0_node_ptr_ = static_cast<strvar_node_ptr>(binary_node<T>::branch_[0].first); } if (is_string_node(binary_node<T>::branch_[1].first)) { str1_node_ptr_ = static_cast<strvar_node_ptr>(binary_node<T>::branch_[1].first); } initialised_ = (str0_node_ptr_ && str1_node_ptr_); } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); std::swap(str0_node_ptr_->ref(),str1_node_ptr_->ref()); } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return str0_node_ptr_->str(); } char_cptr base() const { return str0_node_ptr_->base(); } std::size_t size() const { return str0_node_ptr_->size(); } range_t& range_ref() { return str0_node_ptr_->range_ref(); } const range_t& range_ref() const { return str0_node_ptr_->range_ref(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strswap; } private: bool initialised_; strvar_node_ptr str0_node_ptr_; strvar_node_ptr str1_node_ptr_; }; template <typename T> class swap_genstrings_node : public binary_node<T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; swap_genstrings_node(expression_ptr branch0, expression_ptr branch1) : binary_node<T>(details::e_default, branch0, branch1), str0_base_ptr_ (0), str1_base_ptr_ (0), str0_range_ptr_(0), str1_range_ptr_(0), initialised_(false) { if (is_generally_string_node(binary_node<T>::branch_[0].first)) { str0_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[0].first); if (0 == str0_base_ptr_) return; irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[0].first); if (0 == range) return; str0_range_ptr_ = &(range->range_ref()); } if (is_generally_string_node(binary_node<T>::branch_[1].first)) { str1_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[1].first); if (0 == str1_base_ptr_) return; irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[1].first); if (0 == range) return; str1_range_ptr_ = &(range->range_ref()); } initialised_ = str0_base_ptr_ && str1_base_ptr_ && str0_range_ptr_ && str1_range_ptr_ ; } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); std::size_t str0_r0 = 0; std::size_t str0_r1 = 0; std::size_t str1_r0 = 0; std::size_t str1_r1 = 0; range_t& range0 = (*str0_range_ptr_); range_t& range1 = (*str1_range_ptr_); if ( range0(str0_r0,str0_r1,str0_base_ptr_->size()) && range1(str1_r0,str1_r1,str1_base_ptr_->size()) ) { const std::size_t size0 = range0.cache_size(); const std::size_t size1 = range1.cache_size(); const std::size_t max_size = std::min(size0,size1); char_ptr s0 = const_cast<char_ptr>(str0_base_ptr_->base() + str0_r0); char_ptr s1 = const_cast<char_ptr>(str1_base_ptr_->base() + str1_r0); loop_unroll::details lud(max_size); char_cptr upper_bound = s0 + lud.upper_bound; while (s0 < upper_bound) { #define exprtk_loop(N) \ std::swap(s0[N], s1[N]); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif s0 += lud.batch_size; s1 += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : { std::swap(s0[i],s1[i]); ++i; } \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt } } return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strswap; } private: swap_genstrings_node(swap_genstrings_node<T>&); swap_genstrings_node<T>& operator=(swap_genstrings_node<T>&); str_base_ptr str0_base_ptr_; str_base_ptr str1_base_ptr_; range_ptr str0_range_ptr_; range_ptr str1_range_ptr_; bool initialised_; }; template <typename T> class stringvar_size_node : public expression_node<T> { public: static std::string null_value; explicit stringvar_size_node() : value_(&null_value) {} explicit stringvar_size_node(std::string& v) : value_(&v) {} inline T value() const { return T((*value_).size()); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_stringvarsize; } private: std::string* value_; }; template <typename T> std::string stringvar_size_node<T>::null_value = std::string(""); template <typename T> class string_size_node : public expression_node<T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; string_size_node(expression_ptr brnch) : branch_(brnch), branch_deletable_(branch_deletable(branch_)), str_base_ptr_(0) { if (is_generally_string_node(branch_)) { str_base_ptr_ = dynamic_cast<str_base_ptr>(branch_); if (0 == str_base_ptr_) return; } } ~string_size_node() { if (branch_ && branch_deletable_) { destroy_node(branch_); } } inline T value() const { T result = std::numeric_limits<T>::quiet_NaN(); if (str_base_ptr_) { branch_->value(); result = T(str_base_ptr_->size()); } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_stringsize; } private: expression_ptr branch_; const bool branch_deletable_; str_base_ptr str_base_ptr_; }; struct asn_assignment { static inline void execute(std::string& s, char_cptr data, const std::size_t size) { s.assign(data,size); } }; struct asn_addassignment { static inline void execute(std::string& s, char_cptr data, const std::size_t size) { s.append(data,size); } }; template <typename T, typename AssignmentProcess = asn_assignment> class assignment_string_node : public binary_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef stringvar_node <T>* strvar_node_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; assignment_string_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), initialised_(false), str0_base_ptr_ (0), str1_base_ptr_ (0), str0_node_ptr_ (0), str1_range_ptr_(0) { if (is_string_node(binary_node<T>::branch_[0].first)) { str0_node_ptr_ = static_cast<strvar_node_ptr>(binary_node<T>::branch_[0].first); str0_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[0].first); } if (is_generally_string_node(binary_node<T>::branch_[1].first)) { str1_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[1].first); if (0 == str1_base_ptr_) return; irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[1].first); if (0 == range) return; str1_range_ptr_ = &(range->range_ref()); } initialised_ = str0_base_ptr_ && str1_base_ptr_ && str0_node_ptr_ && str1_range_ptr_ ; } inline T value() const { if (initialised_) { binary_node<T>::branch_[1].first->value(); std::size_t r0 = 0; std::size_t r1 = 0; range_t& range = (*str1_range_ptr_); if (range(r0, r1, str1_base_ptr_->size())) { AssignmentProcess::execute(str0_node_ptr_->ref(), str1_base_ptr_->base() + r0, (r1 - r0) + 1); binary_node<T>::branch_[0].first->value(); } } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return str0_node_ptr_->str(); } char_cptr base() const { return str0_node_ptr_->base(); } std::size_t size() const { return str0_node_ptr_->size(); } range_t& range_ref() { return str0_node_ptr_->range_ref(); } const range_t& range_ref() const { return str0_node_ptr_->range_ref(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strass; } private: bool initialised_; str_base_ptr str0_base_ptr_; str_base_ptr str1_base_ptr_; strvar_node_ptr str0_node_ptr_; range_ptr str1_range_ptr_; }; template <typename T, typename AssignmentProcess = asn_assignment> class assignment_string_range_node : public binary_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef stringvar_node <T>* strvar_node_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; assignment_string_range_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), initialised_(false), str0_base_ptr_ (0), str1_base_ptr_ (0), str0_node_ptr_ (0), str0_range_ptr_(0), str1_range_ptr_(0) { if (is_string_range_node(binary_node<T>::branch_[0].first)) { str0_node_ptr_ = static_cast<strvar_node_ptr>(binary_node<T>::branch_[0].first); str0_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[0].first); irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[0].first); if (0 == range) return; str0_range_ptr_ = &(range->range_ref()); } if (is_generally_string_node(binary_node<T>::branch_[1].first)) { str1_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[1].first); if (0 == str1_base_ptr_) return; irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[1].first); if (0 == range) return; str1_range_ptr_ = &(range->range_ref()); } initialised_ = str0_base_ptr_ && str1_base_ptr_ && str0_node_ptr_ && str0_range_ptr_ && str1_range_ptr_ ; } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); std::size_t s0_r0 = 0; std::size_t s0_r1 = 0; std::size_t s1_r0 = 0; std::size_t s1_r1 = 0; range_t& range0 = (*str0_range_ptr_); range_t& range1 = (*str1_range_ptr_); if ( range0(s0_r0, s0_r1, str0_base_ptr_->size()) && range1(s1_r0, s1_r1, str1_base_ptr_->size()) ) { std::size_t size = std::min((s0_r1 - s0_r0),(s1_r1 - s1_r0)) + 1; std::copy(str1_base_ptr_->base() + s1_r0, str1_base_ptr_->base() + s1_r0 + size, const_cast<char_ptr>(base() + s0_r0)); } } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return str0_node_ptr_->str(); } char_cptr base() const { return str0_node_ptr_->base(); } std::size_t size() const { return str0_node_ptr_->size(); } range_t& range_ref() { return str0_node_ptr_->range_ref(); } const range_t& range_ref() const { return str0_node_ptr_->range_ref(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strass; } private: bool initialised_; str_base_ptr str0_base_ptr_; str_base_ptr str1_base_ptr_; strvar_node_ptr str0_node_ptr_; range_ptr str0_range_ptr_; range_ptr str1_range_ptr_; }; template <typename T> class conditional_string_node : public trinary_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; conditional_string_node(expression_ptr test, expression_ptr consequent, expression_ptr alternative) : trinary_node<T>(details::e_default,consequent,alternative,test), initialised_(false), str0_base_ptr_ (0), str1_base_ptr_ (0), str0_range_ptr_(0), str1_range_ptr_(0), test_ (test), consequent_ (consequent), alternative_(alternative) { range_.n0_c = std::make_pair<bool,std::size_t>(true,0); range_.n1_c = std::make_pair<bool,std::size_t>(true,0); range_.cache.first = range_.n0_c.second; range_.cache.second = range_.n1_c.second; if (is_generally_string_node(trinary_node<T>::branch_[0].first)) { str0_base_ptr_ = dynamic_cast<str_base_ptr>(trinary_node<T>::branch_[0].first); if (0 == str0_base_ptr_) return; str0_range_ptr_ = dynamic_cast<irange_ptr>(trinary_node<T>::branch_[0].first); if (0 == str0_range_ptr_) return; } if (is_generally_string_node(trinary_node<T>::branch_[1].first)) { str1_base_ptr_ = dynamic_cast<str_base_ptr>(trinary_node<T>::branch_[1].first); if (0 == str1_base_ptr_) return; str1_range_ptr_ = dynamic_cast<irange_ptr>(trinary_node<T>::branch_[1].first); if (0 == str1_range_ptr_) return; } initialised_ = str0_base_ptr_ && str1_base_ptr_ && str0_range_ptr_ && str1_range_ptr_ ; } inline T value() const { if (initialised_) { std::size_t r0 = 0; std::size_t r1 = 0; if (is_true(test_)) { consequent_->value(); range_t& range = str0_range_ptr_->range_ref(); if (range(r0, r1, str0_base_ptr_->size())) { const std::size_t size = (r1 - r0) + 1; value_.assign(str0_base_ptr_->base() + r0, size); range_.n1_c.second = value_.size() - 1; range_.cache.second = range_.n1_c.second; return T(1); } } else { alternative_->value(); range_t& range = str1_range_ptr_->range_ref(); if (range(r0, r1, str1_base_ptr_->size())) { const std::size_t size = (r1 - r0) + 1; value_.assign(str1_base_ptr_->base() + r0, size); range_.n1_c.second = value_.size() - 1; range_.cache.second = range_.n1_c.second; return T(0); } } } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return value_; } char_cptr base() const { return &value_[0]; } std::size_t size() const { return value_.size(); } range_t& range_ref() { return range_; } const range_t& range_ref() const { return range_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strcondition; } private: bool initialised_; str_base_ptr str0_base_ptr_; str_base_ptr str1_base_ptr_; irange_ptr str0_range_ptr_; irange_ptr str1_range_ptr_; mutable range_t range_; mutable std::string value_; expression_ptr test_; expression_ptr consequent_; expression_ptr alternative_; }; template <typename T> class cons_conditional_str_node : public binary_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; cons_conditional_str_node(expression_ptr test, expression_ptr consequent) : binary_node<T>(details::e_default, consequent, test), initialised_(false), str0_base_ptr_ (0), str0_range_ptr_(0), test_ (test), consequent_(consequent) { range_.n0_c = std::make_pair<bool,std::size_t>(true,0); range_.n1_c = std::make_pair<bool,std::size_t>(true,0); range_.cache.first = range_.n0_c.second; range_.cache.second = range_.n1_c.second; if (is_generally_string_node(binary_node<T>::branch_[0].first)) { str0_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[0].first); if (0 == str0_base_ptr_) return; str0_range_ptr_ = dynamic_cast<irange_ptr>(binary_node<T>::branch_[0].first); if (0 == str0_range_ptr_) return; } initialised_ = str0_base_ptr_ && str0_range_ptr_ ; } inline T value() const { if (initialised_) { if (is_true(test_)) { consequent_->value(); range_t& range = str0_range_ptr_->range_ref(); std::size_t r0 = 0; std::size_t r1 = 0; if (range(r0, r1, str0_base_ptr_->size())) { const std::size_t size = (r1 - r0) + 1; value_.assign(str0_base_ptr_->base() + r0, size); range_.n1_c.second = value_.size() - 1; range_.cache.second = range_.n1_c.second; return T(1); } } } return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return value_; } char_cptr base() const { return &value_[0]; } std::size_t size() const { return value_.size(); } range_t& range_ref() { return range_; } const range_t& range_ref() const { return range_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strccondition; } private: bool initialised_; str_base_ptr str0_base_ptr_; irange_ptr str0_range_ptr_; mutable range_t range_; mutable std::string value_; expression_ptr test_; expression_ptr consequent_; }; template <typename T, typename VarArgFunction> class str_vararg_node : public expression_node <T>, public string_base_node<T>, public range_interface <T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; template <typename Allocator, template <typename,typename> class Sequence> str_vararg_node(const Sequence<expression_ptr,Allocator>& arg_list) : final_node_(arg_list.back()), final_deletable_(branch_deletable(final_node_)), initialised_(false), str_base_ptr_ (0), str_range_ptr_(0) { if (0 == final_node_) return; else if (!is_generally_string_node(final_node_)) return; str_base_ptr_ = dynamic_cast<str_base_ptr>(final_node_); if (0 == str_base_ptr_) return; str_range_ptr_ = dynamic_cast<irange_ptr>(final_node_); if (0 == str_range_ptr_) return; initialised_ = str_base_ptr_ && str_range_ptr_; if (arg_list.size() > 1) { const std::size_t arg_list_size = arg_list.size() - 1; arg_list_.resize(arg_list_size); delete_branch_.resize(arg_list_size); for (std::size_t i = 0; i < arg_list_size; ++i) { if (arg_list[i]) { arg_list_[i] = arg_list[i]; delete_branch_[i] = static_cast<unsigned char>(branch_deletable(arg_list_[i]) ? 1 : 0); } else { arg_list_ .clear(); delete_branch_.clear(); return; } } } } ~str_vararg_node() { if (final_node_ && final_deletable_) { destroy_node(final_node_); } for (std::size_t i = 0; i < arg_list_.size(); ++i) { if (arg_list_[i] && delete_branch_[i]) { destroy_node(arg_list_[i]); } } } inline T value() const { if (!arg_list_.empty()) { VarArgFunction::process(arg_list_); } final_node_->value(); return std::numeric_limits<T>::quiet_NaN(); } std::string str() const { return str_base_ptr_->str(); } char_cptr base() const { return str_base_ptr_->base(); } std::size_t size() const { return str_base_ptr_->size(); } range_t& range_ref() { return str_range_ptr_->range_ref(); } const range_t& range_ref() const { return str_range_ptr_->range_ref(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_stringvararg; } private: expression_ptr final_node_; bool final_deletable_; bool initialised_; str_base_ptr str_base_ptr_; irange_ptr str_range_ptr_; std::vector<expression_ptr> arg_list_; std::vector<unsigned char> delete_branch_; }; #endif template <typename T, std::size_t N> inline T axn(T a, T x) { // a*x^n return a * exprtk::details::numeric::fast_exp<T,N>::result(x); } template <typename T, std::size_t N> inline T axnb(T a, T x, T b) { // a*x^n+b return a * exprtk::details::numeric::fast_exp<T,N>::result(x) + b; } template <typename T> struct sf_base { typedef typename details::functor_t<T>::Type Type; typedef typename details::functor_t<T> functor_t; typedef typename functor_t::qfunc_t quaternary_functor_t; typedef typename functor_t::tfunc_t trinary_functor_t; typedef typename functor_t::bfunc_t binary_functor_t; typedef typename functor_t::ufunc_t unary_functor_t; }; #define define_sfop3(NN,OP0,OP1) \ template <typename T> \ struct sf##NN##_op : public sf_base<T> \ { \ typedef typename sf_base<T>::Type Type; \ static inline T process(Type x, Type y, Type z) \ { \ return (OP0); \ } \ static inline std::string id() \ { \ return OP1; \ } \ }; \ define_sfop3(00,(x + y) / z ,"(t+t)/t") define_sfop3(01,(x + y) * z ,"(t+t)*t") define_sfop3(02,(x + y) - z ,"(t+t)-t") define_sfop3(03,(x + y) + z ,"(t+t)+t") define_sfop3(04,(x - y) + z ,"(t-t)+t") define_sfop3(05,(x - y) / z ,"(t-t)/t") define_sfop3(06,(x - y) * z ,"(t-t)*t") define_sfop3(07,(x * y) + z ,"(t*t)+t") define_sfop3(08,(x * y) - z ,"(t*t)-t") define_sfop3(09,(x * y) / z ,"(t*t)/t") define_sfop3(10,(x * y) * z ,"(t*t)*t") define_sfop3(11,(x / y) + z ,"(t/t)+t") define_sfop3(12,(x / y) - z ,"(t/t)-t") define_sfop3(13,(x / y) / z ,"(t/t)/t") define_sfop3(14,(x / y) * z ,"(t/t)*t") define_sfop3(15,x / (y + z) ,"t/(t+t)") define_sfop3(16,x / (y - z) ,"t/(t-t)") define_sfop3(17,x / (y * z) ,"t/(t*t)") define_sfop3(18,x / (y / z) ,"t/(t/t)") define_sfop3(19,x * (y + z) ,"t*(t+t)") define_sfop3(20,x * (y - z) ,"t*(t-t)") define_sfop3(21,x * (y * z) ,"t*(t*t)") define_sfop3(22,x * (y / z) ,"t*(t/t)") define_sfop3(23,x - (y + z) ,"t-(t+t)") define_sfop3(24,x - (y - z) ,"t-(t-t)") define_sfop3(25,x - (y / z) ,"t-(t/t)") define_sfop3(26,x - (y * z) ,"t-(t*t)") define_sfop3(27,x + (y * z) ,"t+(t*t)") define_sfop3(28,x + (y / z) ,"t+(t/t)") define_sfop3(29,x + (y + z) ,"t+(t+t)") define_sfop3(30,x + (y - z) ,"t+(t-t)") define_sfop3(31,(axnb<T,2>(x,y,z))," ") define_sfop3(32,(axnb<T,3>(x,y,z))," ") define_sfop3(33,(axnb<T,4>(x,y,z))," ") define_sfop3(34,(axnb<T,5>(x,y,z))," ") define_sfop3(35,(axnb<T,6>(x,y,z))," ") define_sfop3(36,(axnb<T,7>(x,y,z))," ") define_sfop3(37,(axnb<T,8>(x,y,z))," ") define_sfop3(38,(axnb<T,9>(x,y,z))," ") define_sfop3(39,x * numeric::log(y) + z,"") define_sfop3(40,x * numeric::log(y) - z,"") define_sfop3(41,x * numeric::log10(y) + z,"") define_sfop3(42,x * numeric::log10(y) - z,"") define_sfop3(43,x * numeric::sin(y) + z ,"") define_sfop3(44,x * numeric::sin(y) - z ,"") define_sfop3(45,x * numeric::cos(y) + z ,"") define_sfop3(46,x * numeric::cos(y) - z ,"") define_sfop3(47,details::is_true(x) ? y : z,"") #define define_sfop4(NN,OP0,OP1) \ template <typename T> \ struct sf##NN##_op : public sf_base<T> \ { \ typedef typename sf_base<T>::Type Type; \ static inline T process(Type x, Type y, Type z, Type w) \ { \ return (OP0); \ } \ static inline std::string id() { return OP1; } \ }; \ define_sfop4(48,(x + ((y + z) / w)),"t+((t+t)/t)") define_sfop4(49,(x + ((y + z) * w)),"t+((t+t)*t)") define_sfop4(50,(x + ((y - z) / w)),"t+((t-t)/t)") define_sfop4(51,(x + ((y - z) * w)),"t+((t-t)*t)") define_sfop4(52,(x + ((y * z) / w)),"t+((t*t)/t)") define_sfop4(53,(x + ((y * z) * w)),"t+((t*t)*t)") define_sfop4(54,(x + ((y / z) + w)),"t+((t/t)+t)") define_sfop4(55,(x + ((y / z) / w)),"t+((t/t)/t)") define_sfop4(56,(x + ((y / z) * w)),"t+((t/t)*t)") define_sfop4(57,(x - ((y + z) / w)),"t-((t+t)/t)") define_sfop4(58,(x - ((y + z) * w)),"t-((t+t)*t)") define_sfop4(59,(x - ((y - z) / w)),"t-((t-t)/t)") define_sfop4(60,(x - ((y - z) * w)),"t-((t-t)*t)") define_sfop4(61,(x - ((y * z) / w)),"t-((t*t)/t)") define_sfop4(62,(x - ((y * z) * w)),"t-((t*t)*t)") define_sfop4(63,(x - ((y / z) / w)),"t-((t/t)/t)") define_sfop4(64,(x - ((y / z) * w)),"t-((t/t)*t)") define_sfop4(65,(((x + y) * z) - w),"((t+t)*t)-t") define_sfop4(66,(((x - y) * z) - w),"((t-t)*t)-t") define_sfop4(67,(((x * y) * z) - w),"((t*t)*t)-t") define_sfop4(68,(((x / y) * z) - w),"((t/t)*t)-t") define_sfop4(69,(((x + y) / z) - w),"((t+t)/t)-t") define_sfop4(70,(((x - y) / z) - w),"((t-t)/t)-t") define_sfop4(71,(((x * y) / z) - w),"((t*t)/t)-t") define_sfop4(72,(((x / y) / z) - w),"((t/t)/t)-t") define_sfop4(73,((x * y) + (z * w)),"(t*t)+(t*t)") define_sfop4(74,((x * y) - (z * w)),"(t*t)-(t*t)") define_sfop4(75,((x * y) + (z / w)),"(t*t)+(t/t)") define_sfop4(76,((x * y) - (z / w)),"(t*t)-(t/t)") define_sfop4(77,((x / y) + (z / w)),"(t/t)+(t/t)") define_sfop4(78,((x / y) - (z / w)),"(t/t)-(t/t)") define_sfop4(79,((x / y) - (z * w)),"(t/t)-(t*t)") define_sfop4(80,(x / (y + (z * w))),"t/(t+(t*t))") define_sfop4(81,(x / (y - (z * w))),"t/(t-(t*t))") define_sfop4(82,(x * (y + (z * w))),"t*(t+(t*t))") define_sfop4(83,(x * (y - (z * w))),"t*(t-(t*t))") define_sfop4(84,(axn<T,2>(x,y) + axn<T,2>(z,w)),"") define_sfop4(85,(axn<T,3>(x,y) + axn<T,3>(z,w)),"") define_sfop4(86,(axn<T,4>(x,y) + axn<T,4>(z,w)),"") define_sfop4(87,(axn<T,5>(x,y) + axn<T,5>(z,w)),"") define_sfop4(88,(axn<T,6>(x,y) + axn<T,6>(z,w)),"") define_sfop4(89,(axn<T,7>(x,y) + axn<T,7>(z,w)),"") define_sfop4(90,(axn<T,8>(x,y) + axn<T,8>(z,w)),"") define_sfop4(91,(axn<T,9>(x,y) + axn<T,9>(z,w)),"") define_sfop4(92,((details::is_true(x) && details::is_true(y)) ? z : w),"") define_sfop4(93,((details::is_true(x) || details::is_true(y)) ? z : w),"") define_sfop4(94,((x < y) ? z : w),"") define_sfop4(95,((x <= y) ? z : w),"") define_sfop4(96,((x > y) ? z : w),"") define_sfop4(97,((x >= y) ? z : w),"") define_sfop4(98,(details::is_true(numeric::equal(x,y)) ? z : w),"") define_sfop4(99,(x * numeric::sin(y) + z * numeric::cos(w)),"") define_sfop4(ext00,((x + y) - (z * w)),"(t+t)-(t*t)") define_sfop4(ext01,((x + y) - (z / w)),"(t+t)-(t/t)") define_sfop4(ext02,((x + y) + (z * w)),"(t+t)+(t*t)") define_sfop4(ext03,((x + y) + (z / w)),"(t+t)+(t/t)") define_sfop4(ext04,((x - y) + (z * w)),"(t-t)+(t*t)") define_sfop4(ext05,((x - y) + (z / w)),"(t-t)+(t/t)") define_sfop4(ext06,((x - y) - (z * w)),"(t-t)-(t*t)") define_sfop4(ext07,((x - y) - (z / w)),"(t-t)-(t/t)") define_sfop4(ext08,((x + y) - (z - w)),"(t+t)-(t-t)") define_sfop4(ext09,((x + y) + (z - w)),"(t+t)+(t-t)") define_sfop4(ext10,((x + y) + (z + w)),"(t+t)+(t+t)") define_sfop4(ext11,((x + y) * (z - w)),"(t+t)*(t-t)") define_sfop4(ext12,((x + y) / (z - w)),"(t+t)/(t-t)") define_sfop4(ext13,((x - y) - (z + w)),"(t-t)-(t+t)") define_sfop4(ext14,((x - y) + (z + w)),"(t-t)+(t+t)") define_sfop4(ext15,((x - y) * (z + w)),"(t-t)*(t+t)") define_sfop4(ext16,((x - y) / (z + w)),"(t-t)/(t+t)") define_sfop4(ext17,((x * y) - (z + w)),"(t*t)-(t+t)") define_sfop4(ext18,((x / y) - (z + w)),"(t/t)-(t+t)") define_sfop4(ext19,((x * y) + (z + w)),"(t*t)+(t+t)") define_sfop4(ext20,((x / y) + (z + w)),"(t/t)+(t+t)") define_sfop4(ext21,((x * y) + (z - w)),"(t*t)+(t-t)") define_sfop4(ext22,((x / y) + (z - w)),"(t/t)+(t-t)") define_sfop4(ext23,((x * y) - (z - w)),"(t*t)-(t-t)") define_sfop4(ext24,((x / y) - (z - w)),"(t/t)-(t-t)") define_sfop4(ext25,((x + y) * (z * w)),"(t+t)*(t*t)") define_sfop4(ext26,((x + y) * (z / w)),"(t+t)*(t/t)") define_sfop4(ext27,((x + y) / (z * w)),"(t+t)/(t*t)") define_sfop4(ext28,((x + y) / (z / w)),"(t+t)/(t/t)") define_sfop4(ext29,((x - y) / (z * w)),"(t-t)/(t*t)") define_sfop4(ext30,((x - y) / (z / w)),"(t-t)/(t/t)") define_sfop4(ext31,((x - y) * (z * w)),"(t-t)*(t*t)") define_sfop4(ext32,((x - y) * (z / w)),"(t-t)*(t/t)") define_sfop4(ext33,((x * y) * (z + w)),"(t*t)*(t+t)") define_sfop4(ext34,((x / y) * (z + w)),"(t/t)*(t+t)") define_sfop4(ext35,((x * y) / (z + w)),"(t*t)/(t+t)") define_sfop4(ext36,((x / y) / (z + w)),"(t/t)/(t+t)") define_sfop4(ext37,((x * y) / (z - w)),"(t*t)/(t-t)") define_sfop4(ext38,((x / y) / (z - w)),"(t/t)/(t-t)") define_sfop4(ext39,((x * y) * (z - w)),"(t*t)*(t-t)") define_sfop4(ext40,((x * y) / (z * w)),"(t*t)/(t*t)") define_sfop4(ext41,((x / y) * (z / w)),"(t/t)*(t/t)") define_sfop4(ext42,((x / y) * (z - w)),"(t/t)*(t-t)") define_sfop4(ext43,((x * y) * (z * w)),"(t*t)*(t*t)") define_sfop4(ext44,(x + (y * (z / w))),"t+(t*(t/t))") define_sfop4(ext45,(x - (y * (z / w))),"t-(t*(t/t))") define_sfop4(ext46,(x + (y / (z * w))),"t+(t/(t*t))") define_sfop4(ext47,(x - (y / (z * w))),"t-(t/(t*t))") define_sfop4(ext48,(((x - y) - z) * w),"((t-t)-t)*t") define_sfop4(ext49,(((x - y) - z) / w),"((t-t)-t)/t") define_sfop4(ext50,(((x - y) + z) * w),"((t-t)+t)*t") define_sfop4(ext51,(((x - y) + z) / w),"((t-t)+t)/t") define_sfop4(ext52,((x + (y - z)) * w),"(t+(t-t))*t") define_sfop4(ext53,((x + (y - z)) / w),"(t+(t-t))/t") define_sfop4(ext54,((x + y) / (z + w)),"(t+t)/(t+t)") define_sfop4(ext55,((x - y) / (z - w)),"(t-t)/(t-t)") define_sfop4(ext56,((x + y) * (z + w)),"(t+t)*(t+t)") define_sfop4(ext57,((x - y) * (z - w)),"(t-t)*(t-t)") define_sfop4(ext58,((x - y) + (z - w)),"(t-t)+(t-t)") define_sfop4(ext59,((x - y) - (z - w)),"(t-t)-(t-t)") define_sfop4(ext60,((x / y) + (z * w)),"(t/t)+(t*t)") define_sfop4(ext61,(((x * y) * z) / w),"((t*t)*t)/t") #undef define_sfop3 #undef define_sfop4 template <typename T, typename SpecialFunction> class sf3_node : public trinary_node<T> { public: typedef expression_node<T>* expression_ptr; sf3_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1, expression_ptr branch2) : trinary_node<T>(opr, branch0, branch1, branch2) {} inline T value() const { const T x = trinary_node<T>::branch_[0].first->value(); const T y = trinary_node<T>::branch_[1].first->value(); const T z = trinary_node<T>::branch_[2].first->value(); return SpecialFunction::process(x, y, z); } }; template <typename T, typename SpecialFunction> class sf4_node : public quaternary_node<T> { public: typedef expression_node<T>* expression_ptr; sf4_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1, expression_ptr branch2, expression_ptr branch3) : quaternary_node<T>(opr, branch0, branch1, branch2, branch3) {} inline T value() const { const T x = quaternary_node<T>::branch_[0].first->value(); const T y = quaternary_node<T>::branch_[1].first->value(); const T z = quaternary_node<T>::branch_[2].first->value(); const T w = quaternary_node<T>::branch_[3].first->value(); return SpecialFunction::process(x, y, z, w); } }; template <typename T, typename SpecialFunction> class sf3_var_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; sf3_var_node(const T& v0, const T& v1, const T& v2) : v0_(v0), v1_(v1), v2_(v2) {} inline T value() const { return SpecialFunction::process(v0_, v1_, v2_); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_trinary; } private: sf3_var_node(sf3_var_node<T,SpecialFunction>&); sf3_var_node<T,SpecialFunction>& operator=(sf3_var_node<T,SpecialFunction>&); const T& v0_; const T& v1_; const T& v2_; }; template <typename T, typename SpecialFunction> class sf4_var_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; sf4_var_node(const T& v0, const T& v1, const T& v2, const T& v3) : v0_(v0), v1_(v1), v2_(v2), v3_(v3) {} inline T value() const { return SpecialFunction::process(v0_, v1_, v2_, v3_); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_trinary; } private: sf4_var_node(sf4_var_node<T,SpecialFunction>&); sf4_var_node<T,SpecialFunction>& operator=(sf4_var_node<T,SpecialFunction>&); const T& v0_; const T& v1_; const T& v2_; const T& v3_; }; template <typename T, typename VarArgFunction> class vararg_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; template <typename Allocator, template <typename,typename> class Sequence> vararg_node(const Sequence<expression_ptr,Allocator>& arg_list) { arg_list_ .resize(arg_list.size()); delete_branch_.resize(arg_list.size()); for (std::size_t i = 0; i < arg_list.size(); ++i) { if (arg_list[i]) { arg_list_[i] = arg_list[i]; delete_branch_[i] = static_cast<unsigned char>(branch_deletable(arg_list_[i]) ? 1 : 0); } else { arg_list_.clear(); delete_branch_.clear(); return; } } } ~vararg_node() { for (std::size_t i = 0; i < arg_list_.size(); ++i) { if (arg_list_[i] && delete_branch_[i]) { destroy_node(arg_list_[i]); } } } inline T value() const { if (!arg_list_.empty()) return VarArgFunction::process(arg_list_); else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vararg; } private: std::vector<expression_ptr> arg_list_; std::vector<unsigned char> delete_branch_; }; template <typename T, typename VarArgFunction> class vararg_varnode : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; template <typename Allocator, template <typename,typename> class Sequence> vararg_varnode(const Sequence<expression_ptr,Allocator>& arg_list) { arg_list_.resize(arg_list.size()); for (std::size_t i = 0; i < arg_list.size(); ++i) { if (arg_list[i] && is_variable_node(arg_list[i])) { variable_node<T>* var_node_ptr = static_cast<variable_node<T>*>(arg_list[i]); arg_list_[i] = (&var_node_ptr->ref()); } else { arg_list_.clear(); return; } } } inline T value() const { if (!arg_list_.empty()) return VarArgFunction::process(arg_list_); else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vararg; } private: std::vector<const T*> arg_list_; }; template <typename T, typename VecFunction> class vectorize_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; vectorize_node(const expression_ptr v) : ivec_ptr_(0), v_(v), v_deletable_(branch_deletable(v_)) { if (is_ivector_node(v)) { ivec_ptr_ = dynamic_cast<vector_interface<T>*>(v); } else ivec_ptr_ = 0; } ~vectorize_node() { if (v_ && v_deletable_) { destroy_node(v_); } } inline T value() const { if (ivec_ptr_) { v_->value(); return VecFunction::process(ivec_ptr_); } else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecfunc; } private: vector_interface<T>* ivec_ptr_; expression_ptr v_; const bool v_deletable_; }; template <typename T> class assignment_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), var_node_ptr_(0) { if (is_variable_node(binary_node<T>::branch_[0].first)) { var_node_ptr_ = static_cast<variable_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (var_node_ptr_) { T& result = var_node_ptr_->ref(); result = binary_node<T>::branch_[1].first->value(); return result; } else return std::numeric_limits<T>::quiet_NaN(); } private: variable_node<T>* var_node_ptr_; }; template <typename T> class assignment_vec_elem_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_vec_elem_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec_node_ptr_(0) { if (is_vector_elem_node(binary_node<T>::branch_[0].first)) { vec_node_ptr_ = static_cast<vector_elem_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (vec_node_ptr_) { T& result = vec_node_ptr_->ref(); result = binary_node<T>::branch_[1].first->value(); return result; } else return std::numeric_limits<T>::quiet_NaN(); } private: vector_elem_node<T>* vec_node_ptr_; }; template <typename T> class assignment_rebasevec_elem_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_rebasevec_elem_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), rbvec_node_ptr_(0) { if (is_rebasevector_elem_node(binary_node<T>::branch_[0].first)) { rbvec_node_ptr_ = static_cast<rebasevector_elem_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (rbvec_node_ptr_) { T& result = rbvec_node_ptr_->ref(); result = binary_node<T>::branch_[1].first->value(); return result; } else return std::numeric_limits<T>::quiet_NaN(); } private: rebasevector_elem_node<T>* rbvec_node_ptr_; }; template <typename T> class assignment_rebasevec_celem_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_rebasevec_celem_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), rbvec_node_ptr_(0) { if (is_rebasevector_celem_node(binary_node<T>::branch_[0].first)) { rbvec_node_ptr_ = static_cast<rebasevector_celem_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (rbvec_node_ptr_) { T& result = rbvec_node_ptr_->ref(); result = binary_node<T>::branch_[1].first->value(); return result; } else return std::numeric_limits<T>::quiet_NaN(); } private: rebasevector_celem_node<T>* rbvec_node_ptr_; }; template <typename T> class assignment_vec_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; assignment_vec_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec_node_ptr_(0) { if (is_vector_node(binary_node<T>::branch_[0].first)) { vec_node_ptr_ = static_cast<vector_node<T>*>(binary_node<T>::branch_[0].first); vds() = vec_node_ptr_->vds(); } } inline T value() const { if (vec_node_ptr_) { const T v = binary_node<T>::branch_[1].first->value(); T* vec = vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec + lud.upper_bound; while (vec < upper_bound) { #define exprtk_loop(N) \ vec[N] = v; \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec += lud.batch_size; } exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : *vec++ = v; \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return vec_node_ptr_->value(); } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return vec_node_ptr_; } vector_node_ptr vec() { return vec_node_ptr_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecvalass; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node<T>* vec_node_ptr_; vds_t vds_; }; template <typename T> class assignment_vecvec_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; assignment_vecvec_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec0_node_ptr_(0), vec1_node_ptr_(0), initialised_(false), src_is_ivec_(false) { if (is_vector_node(binary_node<T>::branch_[0].first)) { vec0_node_ptr_ = static_cast<vector_node<T>*>(binary_node<T>::branch_[0].first); vds() = vec0_node_ptr_->vds(); } if (is_vector_node(binary_node<T>::branch_[1].first)) { vec1_node_ptr_ = static_cast<vector_node<T>*>(binary_node<T>::branch_[1].first); vds_t::match_sizes(vds(),vec1_node_ptr_->vds()); } else if (is_ivector_node(binary_node<T>::branch_[1].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[1].first))) { vec1_node_ptr_ = vi->vec(); if (!vi->side_effect()) { vi->vds() = vds(); src_is_ivec_ = true; } else vds_t::match_sizes(vds(),vi->vds()); } } initialised_ = (vec0_node_ptr_ && vec1_node_ptr_); } inline T value() const { if (initialised_) { binary_node<T>::branch_[1].first->value(); if (src_is_ivec_) return vec0_node_ptr_->value(); T* vec0 = vec0_node_ptr_->vds().data(); T* vec1 = vec1_node_ptr_->vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec0 + lud.upper_bound; while (vec0 < upper_bound) { #define exprtk_loop(N) \ vec0[N] = vec1[N]; \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec0 += lud.batch_size; vec1 += lud.batch_size; } exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : *vec0++ = *vec1++; \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return vec0_node_ptr_->value(); } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return vec0_node_ptr_; } vector_node_ptr vec() { return vec0_node_ptr_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecvecass; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node<T>* vec0_node_ptr_; vector_node<T>* vec1_node_ptr_; bool initialised_; bool src_is_ivec_; vds_t vds_; }; template <typename T, typename Operation> class assignment_op_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_op_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), var_node_ptr_(0) { if (is_variable_node(binary_node<T>::branch_[0].first)) { var_node_ptr_ = static_cast<variable_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (var_node_ptr_) { T& v = var_node_ptr_->ref(); v = Operation::process(v,binary_node<T>::branch_[1].first->value()); return v; } else return std::numeric_limits<T>::quiet_NaN(); } private: variable_node<T>* var_node_ptr_; }; template <typename T, typename Operation> class assignment_vec_elem_op_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_vec_elem_op_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec_node_ptr_(0) { if (is_vector_elem_node(binary_node<T>::branch_[0].first)) { vec_node_ptr_ = static_cast<vector_elem_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (vec_node_ptr_) { T& v = vec_node_ptr_->ref(); v = Operation::process(v,binary_node<T>::branch_[1].first->value()); return v; } else return std::numeric_limits<T>::quiet_NaN(); } private: vector_elem_node<T>* vec_node_ptr_; }; template <typename T, typename Operation> class assignment_rebasevec_elem_op_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_rebasevec_elem_op_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), rbvec_node_ptr_(0) { if (is_rebasevector_elem_node(binary_node<T>::branch_[0].first)) { rbvec_node_ptr_ = static_cast<rebasevector_elem_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (rbvec_node_ptr_) { T& v = rbvec_node_ptr_->ref(); v = Operation::process(v,binary_node<T>::branch_[1].first->value()); return v; } else return std::numeric_limits<T>::quiet_NaN(); } private: rebasevector_elem_node<T>* rbvec_node_ptr_; }; template <typename T, typename Operation> class assignment_rebasevec_celem_op_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; assignment_rebasevec_celem_op_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), rbvec_node_ptr_(0) { if (is_rebasevector_celem_node(binary_node<T>::branch_[0].first)) { rbvec_node_ptr_ = static_cast<rebasevector_celem_node<T>*>(binary_node<T>::branch_[0].first); } } inline T value() const { if (rbvec_node_ptr_) { T& v = rbvec_node_ptr_->ref(); v = Operation::process(v,binary_node<T>::branch_[1].first->value()); return v; } else return std::numeric_limits<T>::quiet_NaN(); } private: rebasevector_celem_node<T>* rbvec_node_ptr_; }; template <typename T, typename Operation> class assignment_vec_op_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; assignment_vec_op_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec_node_ptr_(0) { if (is_vector_node(binary_node<T>::branch_[0].first)) { vec_node_ptr_ = static_cast<vector_node<T>*>(binary_node<T>::branch_[0].first); vds() = vec_node_ptr_->vds(); } } inline T value() const { if (vec_node_ptr_) { const T v = binary_node<T>::branch_[1].first->value(); T* vec = vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec + lud.upper_bound; while (vec < upper_bound) { #define exprtk_loop(N) \ Operation::assign(vec[N],v); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec += lud.batch_size; } exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : Operation::assign(*vec++,v); \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return vec_node_ptr_->value(); } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return vec_node_ptr_; } vector_node_ptr vec() { return vec_node_ptr_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecopvalass; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } bool side_effect() const { return true; } private: vector_node<T>* vec_node_ptr_; vds_t vds_; }; template <typename T, typename Operation> class assignment_vecvec_op_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vec_data_store<T> vds_t; assignment_vecvec_op_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec0_node_ptr_(0), vec1_node_ptr_(0), initialised_(false) { if (is_vector_node(binary_node<T>::branch_[0].first)) { vec0_node_ptr_ = static_cast<vector_node<T>*>(binary_node<T>::branch_[0].first); vds() = vec0_node_ptr_->vds(); } if (is_vector_node(binary_node<T>::branch_[1].first)) { vec1_node_ptr_ = static_cast<vector_node<T>*>(binary_node<T>::branch_[1].first); vec1_node_ptr_->vds() = vds(); } else if (is_ivector_node(binary_node<T>::branch_[1].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[1].first))) { vec1_node_ptr_ = vi->vec(); vec1_node_ptr_->vds() = vds(); } else vds_t::match_sizes(vds(),vec1_node_ptr_->vds()); } initialised_ = (vec0_node_ptr_ && vec1_node_ptr_); } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); T* vec0 = vec0_node_ptr_->vds().data(); T* vec1 = vec1_node_ptr_->vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec0 + lud.upper_bound; while (vec0 < upper_bound) { #define exprtk_loop(N) \ vec0[N] = Operation::process(vec0[N],vec1[N]); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec0 += lud.batch_size; vec1 += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : { vec0[i] = Operation::process(vec0[i],vec1[i]); ++i; } \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return vec0_node_ptr_->value(); } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return vec0_node_ptr_; } vector_node_ptr vec() { return vec0_node_ptr_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecopvecass; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } bool side_effect() const { return true; } private: vector_node<T>* vec0_node_ptr_; vector_node<T>* vec1_node_ptr_; bool initialised_; vds_t vds_; }; template <typename T, typename Operation> class vec_binop_vecvec_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vector_holder<T>* vector_holder_ptr; typedef vec_data_store<T> vds_t; vec_binop_vecvec_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec0_node_ptr_(0), vec1_node_ptr_(0), temp_ (0), temp_vec_node_(0), initialised_(false) { bool v0_is_ivec = false; bool v1_is_ivec = false; if (is_vector_node(binary_node<T>::branch_[0].first)) { vec0_node_ptr_ = static_cast<vector_node_ptr>(binary_node<T>::branch_[0].first); } else if (is_ivector_node(binary_node<T>::branch_[0].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[0].first))) { vec0_node_ptr_ = vi->vec(); v0_is_ivec = true; } } if (is_vector_node(binary_node<T>::branch_[1].first)) { vec1_node_ptr_ = static_cast<vector_node_ptr>(binary_node<T>::branch_[1].first); } else if (is_ivector_node(binary_node<T>::branch_[1].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[1].first))) { vec1_node_ptr_ = vi->vec(); v1_is_ivec = true; } } if (vec0_node_ptr_ && vec1_node_ptr_) { vector_holder<T>& vec0 = vec0_node_ptr_->vec_holder(); vector_holder<T>& vec1 = vec1_node_ptr_->vec_holder(); if (v0_is_ivec && (vec0.size() <= vec1.size())) vds_ = vds_t(vec0_node_ptr_->vds()); else if (v1_is_ivec && (vec1.size() <= vec0.size())) vds_ = vds_t(vec1_node_ptr_->vds()); else vds_ = vds_t(std::min(vec0.size(),vec1.size())); temp_ = new vector_holder<T>(vds().data(),vds().size()); temp_vec_node_ = new vector_node<T> (vds(),temp_); initialised_ = true; } } ~vec_binop_vecvec_node() { delete temp_; delete temp_vec_node_; } inline T value() const { if (initialised_) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); T* vec0 = vec0_node_ptr_->vds().data(); T* vec1 = vec1_node_ptr_->vds().data(); T* vec2 = vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec2 + lud.upper_bound; while (vec2 < upper_bound) { #define exprtk_loop(N) \ vec2[N] = Operation::process(vec0[N],vec1[N]); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec0 += lud.batch_size; vec1 += lud.batch_size; vec2 += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : { vec2[i] = Operation::process(vec0[i],vec1[i]); ++i; } \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return (vds().data())[0]; } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return temp_vec_node_; } vector_node_ptr vec() { return temp_vec_node_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecvecarith; } std::size_t size() const { return vds_.size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node_ptr vec0_node_ptr_; vector_node_ptr vec1_node_ptr_; vector_holder_ptr temp_; vector_node_ptr temp_vec_node_; bool initialised_; vds_t vds_; }; template <typename T, typename Operation> class vec_binop_vecval_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vector_holder<T>* vector_holder_ptr; typedef vec_data_store<T> vds_t; vec_binop_vecval_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec0_node_ptr_(0), temp_ (0), temp_vec_node_(0) { bool v0_is_ivec = false; if (is_vector_node(binary_node<T>::branch_[0].first)) { vec0_node_ptr_ = static_cast<vector_node_ptr>(binary_node<T>::branch_[0].first); } else if (is_ivector_node(binary_node<T>::branch_[0].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[0].first))) { vec0_node_ptr_ = vi->vec(); v0_is_ivec = true; } } if (vec0_node_ptr_) { if (v0_is_ivec) vds() = vec0_node_ptr_->vds(); else vds() = vds_t(vec0_node_ptr_->size()); temp_ = new vector_holder<T>(vds()); temp_vec_node_ = new vector_node<T> (vds(),temp_); } } ~vec_binop_vecval_node() { delete temp_; delete temp_vec_node_; } inline T value() const { if (vec0_node_ptr_) { binary_node<T>::branch_[0].first->value(); const T v = binary_node<T>::branch_[1].first->value(); T* vec0 = vec0_node_ptr_->vds().data(); T* vec1 = vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec0 + lud.upper_bound; while (vec0 < upper_bound) { #define exprtk_loop(N) \ vec1[N] = Operation::process(vec0[N],v); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec0 += lud.batch_size; vec1 += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : { vec1[i] = Operation::process(vec0[i],v); ++i; } \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return (vds().data())[0]; } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return temp_vec_node_; } vector_node_ptr vec() { return temp_vec_node_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecvalarith; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node_ptr vec0_node_ptr_; vector_holder_ptr temp_; vector_node_ptr temp_vec_node_; vds_t vds_; }; template <typename T, typename Operation> class vec_binop_valvec_node : public binary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vector_holder<T>* vector_holder_ptr; typedef vec_data_store<T> vds_t; vec_binop_valvec_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), vec1_node_ptr_(0), temp_ (0), temp_vec_node_(0) { bool v1_is_ivec = false; if (is_vector_node(binary_node<T>::branch_[1].first)) { vec1_node_ptr_ = static_cast<vector_node_ptr>(binary_node<T>::branch_[1].first); } else if (is_ivector_node(binary_node<T>::branch_[1].first)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(binary_node<T>::branch_[1].first))) { vec1_node_ptr_ = vi->vec(); v1_is_ivec = true; } } if (vec1_node_ptr_) { if (v1_is_ivec) vds() = vec1_node_ptr_->vds(); else vds() = vds_t(vec1_node_ptr_->size()); temp_ = new vector_holder<T>(vds()); temp_vec_node_ = new vector_node<T> (vds(),temp_); } } ~vec_binop_valvec_node() { delete temp_; delete temp_vec_node_; } inline T value() const { if (vec1_node_ptr_) { const T v = binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); T* vec0 = vds().data(); T* vec1 = vec1_node_ptr_->vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec0 + lud.upper_bound; while (vec0 < upper_bound) { #define exprtk_loop(N) \ vec0[N] = Operation::process(v,vec1[N]); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec0 += lud.batch_size; vec1 += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : { vec0[i] = Operation::process(v,vec1[i]); ++i; } \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return (vds().data())[0]; } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return temp_vec_node_; } vector_node_ptr vec() { return temp_vec_node_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecvalarith; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node_ptr vec1_node_ptr_; vector_holder_ptr temp_; vector_node_ptr temp_vec_node_; vds_t vds_; }; template <typename T, typename Operation> class unary_vector_node : public unary_node <T>, public vector_interface<T> { public: typedef expression_node<T>* expression_ptr; typedef vector_node<T>* vector_node_ptr; typedef vector_holder<T>* vector_holder_ptr; typedef vec_data_store<T> vds_t; unary_vector_node(const operator_type& opr, expression_ptr branch0) : unary_node<T>(opr, branch0), vec0_node_ptr_(0), temp_ (0), temp_vec_node_(0) { bool vec0_is_ivec = false; if (is_vector_node(unary_node<T>::branch_)) { vec0_node_ptr_ = static_cast<vector_node_ptr>(unary_node<T>::branch_); } else if (is_ivector_node(unary_node<T>::branch_)) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 != (vi = dynamic_cast<vector_interface<T>*>(unary_node<T>::branch_))) { vec0_node_ptr_ = vi->vec(); vec0_is_ivec = true; } } if (vec0_node_ptr_) { if (vec0_is_ivec) vds_ = vec0_node_ptr_->vds(); else vds_ = vds_t(vec0_node_ptr_->size()); temp_ = new vector_holder<T>(vds()); temp_vec_node_ = new vector_node<T> (vds(),temp_); } } ~unary_vector_node() { delete temp_; delete temp_vec_node_; } inline T value() const { unary_node<T>::branch_->value(); if (vec0_node_ptr_) { T* vec0 = vec0_node_ptr_->vds().data(); T* vec1 = vds().data(); loop_unroll::details lud(size()); const T* upper_bound = vec0 + lud.upper_bound; while (vec0 < upper_bound) { #define exprtk_loop(N) \ vec1[N] = Operation::process(vec0[N]); \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec0 += lud.batch_size; vec1 += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : { vec1[i] = Operation::process(vec0[i]); ++i; } \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return (vds().data())[0]; } else return std::numeric_limits<T>::quiet_NaN(); } vector_node_ptr vec() const { return temp_vec_node_; } vector_node_ptr vec() { return temp_vec_node_; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vecunaryop; } std::size_t size() const { return vds().size(); } vds_t& vds() { return vds_; } const vds_t& vds() const { return vds_; } private: vector_node_ptr vec0_node_ptr_; vector_holder_ptr temp_; vector_node_ptr temp_vec_node_; vds_t vds_; }; template <typename T> class scand_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; scand_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1) {} inline T value() const { return ( std::not_equal_to<T>() (T(0),binary_node<T>::branch_[0].first->value()) && std::not_equal_to<T>() (T(0),binary_node<T>::branch_[1].first->value()) ) ? T(1) : T(0); } }; template <typename T> class scor_node : public binary_node<T> { public: typedef expression_node<T>* expression_ptr; scor_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1) {} inline T value() const { return ( std::not_equal_to<T>() (T(0),binary_node<T>::branch_[0].first->value()) || std::not_equal_to<T>() (T(0),binary_node<T>::branch_[1].first->value()) ) ? T(1) : T(0); } }; template <typename T, typename IFunction, std::size_t N> class function_N_node : public expression_node<T> { public: // Function of N paramters. typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; typedef IFunction ifunction; function_N_node(ifunction* func) : function_((N == func->param_count) ? func : reinterpret_cast<ifunction*>(0)), parameter_count_(func->param_count) {} ~function_N_node() { cleanup_branches::execute<T,N>(branch_); } template <std::size_t NumBranches> bool init_branches(expression_ptr (&b)[NumBranches]) { // Needed for incompetent and broken msvc compiler versions #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4127) #endif if (N != NumBranches) return false; else { for (std::size_t i = 0; i < NumBranches; ++i) { if (b[i]) branch_[i] = std::make_pair(b[i],branch_deletable(b[i])); else return false; } return true; } #ifdef _MSC_VER #pragma warning(pop) #endif } inline bool operator <(const function_N_node<T,IFunction,N>& fn) const { return this < (&fn); } inline T value() const { // Needed for incompetent and broken msvc compiler versions #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4127) #endif if ((0 == function_) || (0 == N)) return std::numeric_limits<T>::quiet_NaN(); else { T v[N]; evaluate_branches<T,N>::execute(v,branch_); return invoke<T,N>::execute(*function_,v); } #ifdef _MSC_VER #pragma warning(pop) #endif } template <typename T_, std::size_t BranchCount> struct evaluate_branches { static inline void execute(T_ (&v)[BranchCount], const branch_t (&b)[BranchCount]) { for (std::size_t i = 0; i < BranchCount; ++i) { v[i] = b[i].first->value(); } } }; template <typename T_> struct evaluate_branches <T_,5> { static inline void execute(T_ (&v)[5], const branch_t (&b)[5]) { v[0] = b[0].first->value(); v[1] = b[1].first->value(); v[2] = b[2].first->value(); v[3] = b[3].first->value(); v[4] = b[4].first->value(); } }; template <typename T_> struct evaluate_branches <T_,4> { static inline void execute(T_ (&v)[4], const branch_t (&b)[4]) { v[0] = b[0].first->value(); v[1] = b[1].first->value(); v[2] = b[2].first->value(); v[3] = b[3].first->value(); } }; template <typename T_> struct evaluate_branches <T_,3> { static inline void execute(T_ (&v)[3], const branch_t (&b)[3]) { v[0] = b[0].first->value(); v[1] = b[1].first->value(); v[2] = b[2].first->value(); } }; template <typename T_> struct evaluate_branches <T_,2> { static inline void execute(T_ (&v)[2], const branch_t (&b)[2]) { v[0] = b[0].first->value(); v[1] = b[1].first->value(); } }; template <typename T_> struct evaluate_branches <T_,1> { static inline void execute(T_ (&v)[1], const branch_t (&b)[1]) { v[0] = b[0].first->value(); } }; template <typename T_, std::size_t ParamCount> struct invoke { static inline T execute(ifunction&, branch_t (&)[ParamCount]) { return std::numeric_limits<T_>::quiet_NaN(); } }; template <typename T_> struct invoke<T_,20> { static inline T_ execute(ifunction& f, T_ (&v)[20]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14],v[15],v[16],v[17],v[18],v[19]); } }; template <typename T_> struct invoke<T_,19> { static inline T_ execute(ifunction& f, T_ (&v)[19]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14],v[15],v[16],v[17],v[18]); } }; template <typename T_> struct invoke<T_,18> { static inline T_ execute(ifunction& f, T_ (&v)[18]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14],v[15],v[16],v[17]); } }; template <typename T_> struct invoke<T_,17> { static inline T_ execute(ifunction& f, T_ (&v)[17]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14],v[15],v[16]); } }; template <typename T_> struct invoke<T_,16> { static inline T_ execute(ifunction& f, T_ (&v)[16]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14],v[15]); } }; template <typename T_> struct invoke<T_,15> { static inline T_ execute(ifunction& f, T_ (&v)[15]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13],v[14]); } }; template <typename T_> struct invoke<T_,14> { static inline T_ execute(ifunction& f, T_ (&v)[14]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12],v[13]); } }; template <typename T_> struct invoke<T_,13> { static inline T_ execute(ifunction& f, T_ (&v)[13]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11],v[12]); } }; template <typename T_> struct invoke<T_,12> { static inline T_ execute(ifunction& f, T_ (&v)[12]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10],v[11]); } }; template <typename T_> struct invoke<T_,11> { static inline T_ execute(ifunction& f, T_ (&v)[11]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9],v[10]); } }; template <typename T_> struct invoke<T_,10> { static inline T_ execute(ifunction& f, T_ (&v)[10]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8],v[9]); } }; template <typename T_> struct invoke<T_,9> { static inline T_ execute(ifunction& f, T_ (&v)[9]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7],v[8]); } }; template <typename T_> struct invoke<T_,8> { static inline T_ execute(ifunction& f, T_ (&v)[8]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6],v[7]); } }; template <typename T_> struct invoke<T_,7> { static inline T_ execute(ifunction& f, T_ (&v)[7]) { return f(v[0],v[1],v[2],v[3],v[4],v[5],v[6]); } }; template <typename T_> struct invoke<T_,6> { static inline T_ execute(ifunction& f, T_ (&v)[6]) { return f(v[0],v[1],v[2],v[3],v[4],v[5]); } }; template <typename T_> struct invoke<T_,5> { static inline T_ execute(ifunction& f, T_ (&v)[5]) { return f(v[0],v[1],v[2],v[3],v[4]); } }; template <typename T_> struct invoke<T_,4> { static inline T_ execute(ifunction& f, T_ (&v)[4]) { return f(v[0],v[1],v[2],v[3]); } }; template <typename T_> struct invoke<T_,3> { static inline T_ execute(ifunction& f, T_ (&v)[3]) { return f(v[0],v[1],v[2]); } }; template <typename T_> struct invoke<T_,2> { static inline T_ execute(ifunction& f, T_ (&v)[2]) { return f(v[0],v[1]); } }; template <typename T_> struct invoke<T_,1> { static inline T_ execute(ifunction& f, T_ (&v)[1]) { return f(v[0]); } }; inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_function; } private: ifunction* function_; std::size_t parameter_count_; branch_t branch_[N]; }; template <typename T, typename IFunction> class function_N_node<T,IFunction,0> : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef IFunction ifunction; function_N_node(ifunction* func) : function_((0 == func->param_count) ? func : reinterpret_cast<ifunction*>(0)) {} inline bool operator <(const function_N_node<T,IFunction,0>& fn) const { return this < (&fn); } inline T value() const { if (function_) return (*function_)(); else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_function; } private: ifunction* function_; }; template <typename T, typename VarArgFunction> class vararg_function_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; vararg_function_node(VarArgFunction* func, const std::vector<expression_ptr>& arg_list) : function_(func), arg_list_(arg_list) { value_list_.resize(arg_list.size(),std::numeric_limits<T>::quiet_NaN()); } ~vararg_function_node() { for (std::size_t i = 0; i < arg_list_.size(); ++i) { if (arg_list_[i] && !details::is_variable_node(arg_list_[i])) { destroy_node(arg_list_[i]); } } } inline bool operator <(const vararg_function_node<T,VarArgFunction>& fn) const { return this < (&fn); } inline T value() const { if (function_) { populate_value_list(); return (*function_)(value_list_); } else return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_vafunction; } private: inline void populate_value_list() const { for (std::size_t i = 0; i < arg_list_.size(); ++i) { value_list_[i] = arg_list_[i]->value(); } } VarArgFunction* function_; std::vector<expression_ptr> arg_list_; mutable std::vector<T> value_list_; }; template <typename T, typename GenericFunction> class generic_function_node : public expression_node<T> { public: typedef type_store<T> type_store_t; typedef expression_node<T>* expression_ptr; typedef variable_node<T> variable_node_t; typedef vector_node<T> vector_node_t; typedef variable_node_t* variable_node_ptr_t; typedef vector_node_t* vector_node_ptr_t; typedef range_interface<T> range_interface_t; typedef range_data_type<T> range_data_type_t; typedef range_pack<T> range_t; typedef std::pair<expression_ptr,bool> branch_t; typedef std::pair<void*,std::size_t> void_t; typedef std::vector<T> tmp_vs_t; typedef std::vector<type_store_t> typestore_list_t; typedef std::vector<range_data_type_t> range_list_t; generic_function_node(const std::vector<expression_ptr>& arg_list, GenericFunction* func = (GenericFunction*)(0)) : function_(func), arg_list_(arg_list) {} virtual ~generic_function_node() { cleanup_branches::execute(branch_); } virtual bool init_branches() { expr_as_vec1_store_.resize(arg_list_.size(),T(0) ); typestore_list_ .resize(arg_list_.size(),type_store_t() ); range_list_ .resize(arg_list_.size(),range_data_type_t()); branch_ .resize(arg_list_.size(),branch_t((expression_ptr)0,false)); for (std::size_t i = 0; i < arg_list_.size(); ++i) { type_store_t& ts = typestore_list_[i]; if (0 == arg_list_[i]) return false; else if (is_ivector_node(arg_list_[i])) { vector_interface<T>* vi = reinterpret_cast<vector_interface<T>*>(0); if (0 == (vi = dynamic_cast<vector_interface<T>*>(arg_list_[i]))) return false; ts.size = vi->size(); ts.data = vi->vds().data(); ts.type = type_store_t::e_vector; } #ifndef exprtk_disable_string_capabilities else if (is_generally_string_node(arg_list_[i])) { string_base_node<T>* sbn = reinterpret_cast<string_base_node<T>*>(0); if (0 == (sbn = dynamic_cast<string_base_node<T>*>(arg_list_[i]))) return false; ts.size = sbn->size(); ts.data = reinterpret_cast<void*>(const_cast<char_ptr>(sbn->base())); ts.type = type_store_t::e_string; range_list_[i].data = ts.data; range_list_[i].size = ts.size; range_list_[i].type_size = sizeof(char); range_list_[i].str_node = sbn; range_interface_t* ri = reinterpret_cast<range_interface_t*>(0); if (0 == (ri = dynamic_cast<range_interface_t*>(arg_list_[i]))) return false; range_t& rp = ri->range_ref(); if ( rp.const_range() && is_const_string_range_node(arg_list_[i]) ) { ts.size = rp.const_size(); ts.data = static_cast<char_ptr>(ts.data) + rp.n0_c.second; range_list_[i].range = reinterpret_cast<range_t*>(0); } else range_list_[i].range = &(ri->range_ref()); } #endif else if (is_variable_node(arg_list_[i])) { variable_node_ptr_t var = variable_node_ptr_t(0); if (0 == (var = dynamic_cast<variable_node_ptr_t>(arg_list_[i]))) return false; ts.size = 1; ts.data = &var->ref(); ts.type = type_store_t::e_scalar; } else { ts.size = 1; ts.data = reinterpret_cast<void*>(&expr_as_vec1_store_[i]); ts.type = type_store_t::e_scalar; } branch_[i] = std::make_pair(arg_list_[i],branch_deletable(arg_list_[i])); } return true; } inline bool operator <(const generic_function_node<T,GenericFunction>& fn) const { return this < (&fn); } inline T value() const { if (function_) { if (populate_value_list()) { typedef typename GenericFunction::parameter_list_t parameter_list_t; return (*function_)(parameter_list_t(typestore_list_)); } } return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_genfunction; } protected: inline virtual bool populate_value_list() const { for (std::size_t i = 0; i < branch_.size(); ++i) { expr_as_vec1_store_[i] = branch_[i].first->value(); } for (std::size_t i = 0; i < branch_.size(); ++i) { range_data_type_t& rdt = range_list_[i]; if (rdt.range) { range_t& rp = (*rdt.range); std::size_t r0 = 0; std::size_t r1 = 0; if (rp(r0,r1,rdt.size)) { type_store_t& ts = typestore_list_[i]; ts.size = rp.cache_size(); #ifndef exprtk_disable_string_capabilities if (ts.type == type_store_t::e_string) ts.data = const_cast<char_ptr>(rdt.str_node->base()) + rp.cache.first; else #endif ts.data = static_cast<char_ptr>(rdt.data) + (rp.cache.first * rdt.type_size); } else return false; } } return true; } GenericFunction* function_; mutable typestore_list_t typestore_list_; private: std::vector<expression_ptr> arg_list_; std::vector<branch_t> branch_; mutable tmp_vs_t expr_as_vec1_store_; mutable range_list_t range_list_; }; #ifndef exprtk_disable_string_capabilities template <typename T, typename StringFunction> class string_function_node : public generic_function_node<T,StringFunction>, public string_base_node<T>, public range_interface <T> { public: typedef generic_function_node<T,StringFunction> gen_function_t; typedef range_pack<T> range_t; string_function_node(StringFunction* func, const std::vector<typename gen_function_t::expression_ptr>& arg_list) : gen_function_t(arg_list,func) { range_.n0_c = std::make_pair<bool,std::size_t>(true,0); range_.n1_c = std::make_pair<bool,std::size_t>(true,0); range_.cache.first = range_.n0_c.second; range_.cache.second = range_.n1_c.second; } inline bool operator <(const string_function_node<T,StringFunction>& fn) const { return this < (&fn); } inline T value() const { T result = std::numeric_limits<T>::quiet_NaN(); if (gen_function_t::function_) { if (gen_function_t::populate_value_list()) { typedef typename StringFunction::parameter_list_t parameter_list_t; result = (*gen_function_t::function_)(ret_string_, parameter_list_t(gen_function_t::typestore_list_)); range_.n1_c.second = ret_string_.size() - 1; range_.cache.second = range_.n1_c.second; return result; } } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strfunction; } std::string str() const { return ret_string_; } char_cptr base() const { return &ret_string_[0]; } std::size_t size() const { return ret_string_.size(); } range_t& range_ref() { return range_; } const range_t& range_ref() const { return range_; } protected: mutable range_t range_; mutable std::string ret_string_; }; #endif template <typename T, typename GenericFunction> class multimode_genfunction_node : public generic_function_node<T,GenericFunction> { public: typedef generic_function_node<T,GenericFunction> gen_function_t; typedef range_pack<T> range_t; multimode_genfunction_node(GenericFunction* func, const std::size_t& param_seq_index, const std::vector<typename gen_function_t::expression_ptr>& arg_list) : gen_function_t(arg_list,func), param_seq_index_(param_seq_index) {} inline T value() const { T result = std::numeric_limits<T>::quiet_NaN(); if (gen_function_t::function_) { if (gen_function_t::populate_value_list()) { typedef typename GenericFunction::parameter_list_t parameter_list_t; return (*gen_function_t::function_)(param_seq_index_, parameter_list_t(gen_function_t::typestore_list_)); } } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_genfunction; } private: std::size_t param_seq_index_; }; #ifndef exprtk_disable_string_capabilities template <typename T, typename StringFunction> class multimode_strfunction_node : public string_function_node<T,StringFunction> { public: typedef string_function_node<T,StringFunction> str_function_t; typedef range_pack<T> range_t; multimode_strfunction_node(StringFunction* func, const std::size_t& param_seq_index, const std::vector<typename str_function_t::expression_ptr>& arg_list) : str_function_t(func,arg_list), param_seq_index_(param_seq_index) {} inline T value() const { T result = std::numeric_limits<T>::quiet_NaN(); if (str_function_t::function_) { if (str_function_t::populate_value_list()) { typedef typename StringFunction::parameter_list_t parameter_list_t; result = (*str_function_t::function_)(param_seq_index_, str_function_t::ret_string_, parameter_list_t(str_function_t::typestore_list_)); str_function_t::range_.n1_c.second = str_function_t::ret_string_.size() - 1; str_function_t::range_.cache.second = str_function_t::range_.n1_c.second; return result; } } return result; } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_strfunction; } private: const std::size_t param_seq_index_; }; #endif class return_exception {}; template <typename T> class null_igenfunc { public: virtual ~null_igenfunc() {} typedef type_store<T> generic_type; typedef typename generic_type::parameter_list parameter_list_t; inline virtual T operator() (parameter_list_t) { return std::numeric_limits<T>::quiet_NaN(); } }; #ifndef exprtk_disable_return_statement template <typename T> class return_node : public generic_function_node<T,null_igenfunc<T> > { public: typedef null_igenfunc<T> igeneric_function_t; typedef igeneric_function_t* igeneric_function_ptr; typedef generic_function_node<T,igeneric_function_t> gen_function_t; typedef results_context<T> results_context_t; return_node(const std::vector<typename gen_function_t::expression_ptr>& arg_list, results_context_t& rc) : gen_function_t (arg_list), results_context_(&rc) {} inline T value() const { if ( (0 != results_context_) && gen_function_t::populate_value_list() ) { typedef typename type_store<T>::parameter_list parameter_list_t; results_context_-> assign(parameter_list_t(gen_function_t::typestore_list_)); throw return_exception(); } return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_return; } private: results_context_t* results_context_; }; template <typename T> class return_envelope_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef results_context<T> results_context_t; return_envelope_node(expression_ptr body, results_context_t& rc) : results_context_(&rc ), return_invoked_ (false), body_ (body ), body_deletable_ (branch_deletable(body_)) {} ~return_envelope_node() { if (body_ && body_deletable_) { destroy_node(body_); } } inline T value() const { try { return_invoked_ = false; results_context_->clear(); return body_->value(); } catch(const return_exception&) { return_invoked_ = true; return std::numeric_limits<T>::quiet_NaN(); } } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_retenv; } inline bool* retinvk_ptr() { return &return_invoked_; } private: results_context_t* results_context_; mutable bool return_invoked_; expression_ptr body_; const bool body_deletable_; }; #endif #define exprtk_define_unary_op(OpName) \ template <typename T> \ struct OpName##_op \ { \ typedef typename functor_t<T>::Type Type; \ typedef typename expression_node<T>::node_type node_t; \ \ static inline T process(Type v) \ { \ return numeric:: OpName (v); \ } \ \ static inline node_t type() \ { \ return expression_node<T>::e_##OpName; \ } \ \ static inline details::operator_type operation() \ { \ return details::e_##OpName; \ } \ }; \ exprtk_define_unary_op(abs ) exprtk_define_unary_op(acos ) exprtk_define_unary_op(acosh) exprtk_define_unary_op(asin ) exprtk_define_unary_op(asinh) exprtk_define_unary_op(atan ) exprtk_define_unary_op(atanh) exprtk_define_unary_op(ceil ) exprtk_define_unary_op(cos ) exprtk_define_unary_op(cosh ) exprtk_define_unary_op(cot ) exprtk_define_unary_op(csc ) exprtk_define_unary_op(d2g ) exprtk_define_unary_op(d2r ) exprtk_define_unary_op(erf ) exprtk_define_unary_op(erfc ) exprtk_define_unary_op(exp ) exprtk_define_unary_op(expm1) exprtk_define_unary_op(floor) exprtk_define_unary_op(frac ) exprtk_define_unary_op(g2d ) exprtk_define_unary_op(log ) exprtk_define_unary_op(log10) exprtk_define_unary_op(log2 ) exprtk_define_unary_op(log1p) exprtk_define_unary_op(ncdf ) exprtk_define_unary_op(neg ) exprtk_define_unary_op(notl ) exprtk_define_unary_op(pos ) exprtk_define_unary_op(r2d ) exprtk_define_unary_op(round) exprtk_define_unary_op(sec ) exprtk_define_unary_op(sgn ) exprtk_define_unary_op(sin ) exprtk_define_unary_op(sinc ) exprtk_define_unary_op(sinh ) exprtk_define_unary_op(sqrt ) exprtk_define_unary_op(tan ) exprtk_define_unary_op(tanh ) exprtk_define_unary_op(trunc) #undef exprtk_define_unary_op template <typename T> struct opr_base { typedef typename details::functor_t<T>::Type Type; typedef typename details::functor_t<T>::RefType RefType; typedef typename details::functor_t<T> functor_t; typedef typename functor_t::qfunc_t quaternary_functor_t; typedef typename functor_t::tfunc_t trinary_functor_t; typedef typename functor_t::bfunc_t binary_functor_t; typedef typename functor_t::ufunc_t unary_functor_t; }; template <typename T> struct add_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; typedef typename opr_base<T>::RefType RefType; static inline T process(Type t1, Type t2) { return t1 + t2; } static inline T process(Type t1, Type t2, Type t3) { return t1 + t2 + t3; } static inline void assign(RefType t1, Type t2) { t1 += t2; } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_add; } static inline details::operator_type operation() { return details::e_add; } }; template <typename T> struct mul_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; typedef typename opr_base<T>::RefType RefType; static inline T process(Type t1, Type t2) { return t1 * t2; } static inline T process(Type t1, Type t2, Type t3) { return t1 * t2 * t3; } static inline void assign(RefType t1, Type t2) { t1 *= t2; } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_mul; } static inline details::operator_type operation() { return details::e_mul; } }; template <typename T> struct sub_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; typedef typename opr_base<T>::RefType RefType; static inline T process(Type t1, Type t2) { return t1 - t2; } static inline T process(Type t1, Type t2, Type t3) { return t1 - t2 - t3; } static inline void assign(RefType t1, Type t2) { t1 -= t2; } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_sub; } static inline details::operator_type operation() { return details::e_sub; } }; template <typename T> struct div_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; typedef typename opr_base<T>::RefType RefType; static inline T process(Type t1, Type t2) { return t1 / t2; } static inline T process(Type t1, Type t2, Type t3) { return t1 / t2 / t3; } static inline void assign(RefType t1, Type t2) { t1 /= t2; } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_div; } static inline details::operator_type operation() { return details::e_div; } }; template <typename T> struct mod_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; typedef typename opr_base<T>::RefType RefType; static inline T process(Type t1, Type t2) { return numeric::modulus<T>(t1,t2); } static inline void assign(RefType t1, Type t2) { t1 = numeric::modulus<T>(t1,t2); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_mod; } static inline details::operator_type operation() { return details::e_mod; } }; template <typename T> struct pow_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; typedef typename opr_base<T>::RefType RefType; static inline T process(Type t1, Type t2) { return numeric::pow<T>(t1,t2); } static inline void assign(RefType t1, Type t2) { t1 = numeric::pow<T>(t1,t2); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_pow; } static inline details::operator_type operation() { return details::e_pow; } }; template <typename T> struct lt_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return ((t1 < t2) ? T(1) : T(0)); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 < t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_lt; } static inline details::operator_type operation() { return details::e_lt; } }; template <typename T> struct lte_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return ((t1 <= t2) ? T(1) : T(0)); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 <= t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_lte; } static inline details::operator_type operation() { return details::e_lte; } }; template <typename T> struct gt_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return ((t1 > t2) ? T(1) : T(0)); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 > t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_gt; } static inline details::operator_type operation() { return details::e_gt; } }; template <typename T> struct gte_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return ((t1 >= t2) ? T(1) : T(0)); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 >= t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_gte; } static inline details::operator_type operation() { return details::e_gte; } }; template <typename T> struct eq_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return (std::equal_to<T>()(t1,t2) ? T(1) : T(0)); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 == t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_eq; } static inline details::operator_type operation() { return details::e_eq; } }; template <typename T> struct equal_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return numeric::equal(t1,t2); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 == t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_eq; } static inline details::operator_type operation() { return details::e_equal; } }; template <typename T> struct ne_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return (std::not_equal_to<T>()(t1,t2) ? T(1) : T(0)); } static inline T process(const std::string& t1, const std::string& t2) { return ((t1 != t2) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_ne; } static inline details::operator_type operation() { return details::e_ne; } }; template <typename T> struct and_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return (details::is_true(t1) && details::is_true(t2)) ? T(1) : T(0); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_and; } static inline details::operator_type operation() { return details::e_and; } }; template <typename T> struct nand_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return (details::is_true(t1) && details::is_true(t2)) ? T(0) : T(1); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_nand; } static inline details::operator_type operation() { return details::e_nand; } }; template <typename T> struct or_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return (details::is_true(t1) || details::is_true(t2)) ? T(1) : T(0); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_or; } static inline details::operator_type operation() { return details::e_or; } }; template <typename T> struct nor_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return (details::is_true(t1) || details::is_true(t2)) ? T(0) : T(1); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_nor; } static inline details::operator_type operation() { return details::e_nor; } }; template <typename T> struct xor_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return numeric::xor_opr<T>(t1,t2); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_nor; } static inline details::operator_type operation() { return details::e_xor; } }; template <typename T> struct xnor_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(Type t1, Type t2) { return numeric::xnor_opr<T>(t1,t2); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_nor; } static inline details::operator_type operation() { return details::e_xnor; } }; template <typename T> struct in_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(const T&, const T&) { return std::numeric_limits<T>::quiet_NaN(); } static inline T process(const std::string& t1, const std::string& t2) { return ((std::string::npos != t2.find(t1)) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_in; } static inline details::operator_type operation() { return details::e_in; } }; template <typename T> struct like_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(const T&, const T&) { return std::numeric_limits<T>::quiet_NaN(); } static inline T process(const std::string& t1, const std::string& t2) { return (details::wc_match(t2,t1) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_like; } static inline details::operator_type operation() { return details::e_like; } }; template <typename T> struct ilike_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(const T&, const T&) { return std::numeric_limits<T>::quiet_NaN(); } static inline T process(const std::string& t1, const std::string& t2) { return (details::wc_imatch(t2,t1) ? T(1) : T(0)); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_ilike; } static inline details::operator_type operation() { return details::e_ilike; } }; template <typename T> struct inrange_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; static inline T process(const T& t0, const T& t1, const T& t2) { return ((t0 <= t1) && (t1 <= t2)) ? T(1) : T(0); } static inline T process(const std::string& t0, const std::string& t1, const std::string& t2) { return ((t0 <= t1) && (t1 <= t2)) ? T(1) : T(0); } static inline typename expression_node<T>::node_type type() { return expression_node<T>::e_inranges; } static inline details::operator_type operation() { return details::e_inrange; } }; template <typename T> inline T value(details::expression_node<T>* n) { return n->value(); } template <typename T> inline T value(T* t) { return (*t); } template <typename T> struct vararg_add_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 0 : return T(0); case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : { T result = T(0); for (std::size_t i = 0; i < arg_list.size(); ++i) { result += value(arg_list[i]); } return result; } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return value(arg_list[0]); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return value(arg_list[0]) + value(arg_list[1]); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return value(arg_list[0]) + value(arg_list[1]) + value(arg_list[2]) ; } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return value(arg_list[0]) + value(arg_list[1]) + value(arg_list[2]) + value(arg_list[3]) ; } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return value(arg_list[0]) + value(arg_list[1]) + value(arg_list[2]) + value(arg_list[3]) + value(arg_list[4]) ; } }; template <typename T> struct vararg_mul_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 0 : return T(0); case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : { T result = T(value(arg_list[0])); for (std::size_t i = 1; i < arg_list.size(); ++i) { result *= value(arg_list[i]); } return result; } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return value(arg_list[0]); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return value(arg_list[0]) * value(arg_list[1]); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return value(arg_list[0]) * value(arg_list[1]) * value(arg_list[2]) ; } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return value(arg_list[0]) * value(arg_list[1]) * value(arg_list[2]) * value(arg_list[3]) ; } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return value(arg_list[0]) * value(arg_list[1]) * value(arg_list[2]) * value(arg_list[3]) * value(arg_list[4]) ; } }; template <typename T> struct vararg_avg_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 0 : return T(0); case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : return vararg_add_op<T>::process(arg_list) / arg_list.size(); } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return value(arg_list[0]); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return (value(arg_list[0]) + value(arg_list[1])) / T(2); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return (value(arg_list[0]) + value(arg_list[1]) + value(arg_list[2])) / T(3); } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return (value(arg_list[0]) + value(arg_list[1]) + value(arg_list[2]) + value(arg_list[3])) / T(4); } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return (value(arg_list[0]) + value(arg_list[1]) + value(arg_list[2]) + value(arg_list[3]) + value(arg_list[4])) / T(5); } }; template <typename T> struct vararg_min_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 0 : return T(0); case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : { T result = T(value(arg_list[0])); for (std::size_t i = 1; i < arg_list.size(); ++i) { const T v = value(arg_list[i]); if (v < result) result = v; } return result; } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return value(arg_list[0]); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return std::min<T>(value(arg_list[0]),value(arg_list[1])); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return std::min<T>(std::min<T>(value(arg_list[0]),value(arg_list[1])),value(arg_list[2])); } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return std::min<T>( std::min<T>(value(arg_list[0]),value(arg_list[1])), std::min<T>(value(arg_list[2]),value(arg_list[3]))); } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return std::min<T>( std::min<T>(std::min<T>(value(arg_list[0]),value(arg_list[1])), std::min<T>(value(arg_list[2]),value(arg_list[3]))), value(arg_list[4])); } }; template <typename T> struct vararg_max_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 0 : return T(0); case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : { T result = T(value(arg_list[0])); for (std::size_t i = 1; i < arg_list.size(); ++i) { const T v = value(arg_list[i]); if (v > result) result = v; } return result; } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return value(arg_list[0]); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return std::max<T>(value(arg_list[0]),value(arg_list[1])); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return std::max<T>(std::max<T>(value(arg_list[0]),value(arg_list[1])),value(arg_list[2])); } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return std::max<T>( std::max<T>(value(arg_list[0]),value(arg_list[1])), std::max<T>(value(arg_list[2]),value(arg_list[3]))); } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return std::max<T>( std::max<T>(std::max<T>(value(arg_list[0]),value(arg_list[1])), std::max<T>(value(arg_list[2]),value(arg_list[3]))), value(arg_list[4])); } }; template <typename T> struct vararg_mand_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : { for (std::size_t i = 0; i < arg_list.size(); ++i) { if (std::equal_to<T>()(T(0),value(arg_list[i]))) return T(0); } return T(1); } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return std::not_equal_to<T>() (T(0),value(arg_list[0])) ? T(1) : T(0); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) && std::not_equal_to<T>()(T(0),value(arg_list[1])) ) ? T(1) : T(0); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) && std::not_equal_to<T>()(T(0),value(arg_list[1])) && std::not_equal_to<T>()(T(0),value(arg_list[2])) ) ? T(1) : T(0); } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) && std::not_equal_to<T>()(T(0),value(arg_list[1])) && std::not_equal_to<T>()(T(0),value(arg_list[2])) && std::not_equal_to<T>()(T(0),value(arg_list[3])) ) ? T(1) : T(0); } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) && std::not_equal_to<T>()(T(0),value(arg_list[1])) && std::not_equal_to<T>()(T(0),value(arg_list[2])) && std::not_equal_to<T>()(T(0),value(arg_list[3])) && std::not_equal_to<T>()(T(0),value(arg_list[4])) ) ? T(1) : T(0); } }; template <typename T> struct vararg_mor_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); default : { for (std::size_t i = 0; i < arg_list.size(); ++i) { if (std::not_equal_to<T>()(T(0),value(arg_list[i]))) return T(1); } return T(0); } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return std::not_equal_to<T>() (T(0),value(arg_list[0])) ? T(1) : T(0); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) || std::not_equal_to<T>()(T(0),value(arg_list[1])) ) ? T(1) : T(0); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) || std::not_equal_to<T>()(T(0),value(arg_list[1])) || std::not_equal_to<T>()(T(0),value(arg_list[2])) ) ? T(1) : T(0); } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) || std::not_equal_to<T>()(T(0),value(arg_list[1])) || std::not_equal_to<T>()(T(0),value(arg_list[2])) || std::not_equal_to<T>()(T(0),value(arg_list[3])) ) ? T(1) : T(0); } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { return ( std::not_equal_to<T>()(T(0),value(arg_list[0])) || std::not_equal_to<T>()(T(0),value(arg_list[1])) || std::not_equal_to<T>()(T(0),value(arg_list[2])) || std::not_equal_to<T>()(T(0),value(arg_list[3])) || std::not_equal_to<T>()(T(0),value(arg_list[4])) ) ? T(1) : T(0); } }; template <typename T> struct vararg_multi_op : public opr_base<T> { typedef typename opr_base<T>::Type Type; template <typename Type, typename Allocator, template <typename,typename> class Sequence> static inline T process(const Sequence<Type,Allocator>& arg_list) { switch (arg_list.size()) { case 0 : return std::numeric_limits<T>::quiet_NaN(); case 1 : return process_1(arg_list); case 2 : return process_2(arg_list); case 3 : return process_3(arg_list); case 4 : return process_4(arg_list); case 5 : return process_5(arg_list); case 6 : return process_6(arg_list); case 7 : return process_7(arg_list); case 8 : return process_8(arg_list); default : { for (std::size_t i = 0; i < (arg_list.size() - 1); ++i) { value(arg_list[i]); } return value(arg_list.back()); } } } template <typename Sequence> static inline T process_1(const Sequence& arg_list) { return value(arg_list[0]); } template <typename Sequence> static inline T process_2(const Sequence& arg_list) { value(arg_list[0]); return value(arg_list[1]); } template <typename Sequence> static inline T process_3(const Sequence& arg_list) { value(arg_list[0]); value(arg_list[1]); return value(arg_list[2]); } template <typename Sequence> static inline T process_4(const Sequence& arg_list) { value(arg_list[0]); value(arg_list[1]); value(arg_list[2]); return value(arg_list[3]); } template <typename Sequence> static inline T process_5(const Sequence& arg_list) { value(arg_list[0]); value(arg_list[1]); value(arg_list[2]); value(arg_list[3]); return value(arg_list[4]); } template <typename Sequence> static inline T process_6(const Sequence& arg_list) { value(arg_list[0]); value(arg_list[1]); value(arg_list[2]); value(arg_list[3]); value(arg_list[4]); return value(arg_list[5]); } template <typename Sequence> static inline T process_7(const Sequence& arg_list) { value(arg_list[0]); value(arg_list[1]); value(arg_list[2]); value(arg_list[3]); value(arg_list[4]); value(arg_list[5]); return value(arg_list[6]); } template <typename Sequence> static inline T process_8(const Sequence& arg_list) { value(arg_list[0]); value(arg_list[1]); value(arg_list[2]); value(arg_list[3]); value(arg_list[4]); value(arg_list[5]); value(arg_list[6]); return value(arg_list[7]); } }; template <typename T> struct vec_add_op { typedef vector_interface<T>* ivector_ptr; static inline T process(const ivector_ptr v) { const T* vec = v->vec()->vds().data(); const std::size_t vec_size = v->vec()->vds().size(); loop_unroll::details lud(vec_size); if (vec_size <= static_cast<std::size_t>(lud.batch_size)) { T result = T(0); int i = 0; exprtk_disable_fallthrough_begin switch (vec_size) { #define case_stmt(N) \ case N : result += vec[i++]; \ #ifndef exprtk_disable_superscalar_unroll case_stmt(16) case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) #endif case_stmt( 4) case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef case_stmt return result; } T r[] = { T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0), T(0) }; const T* upper_bound = vec + lud.upper_bound; while (vec < upper_bound) { #define exprtk_loop(N) \ r[N] += vec[N]; \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : r[0] += vec[i++]; \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return (r[ 0] + r[ 1] + r[ 2] + r[ 3]) #ifndef exprtk_disable_superscalar_unroll + (r[ 4] + r[ 5] + r[ 6] + r[ 7]) + (r[ 8] + r[ 9] + r[10] + r[11]) + (r[12] + r[13] + r[14] + r[15]) #endif ; } }; template <typename T> struct vec_mul_op { typedef vector_interface<T>* ivector_ptr; static inline T process(const ivector_ptr v) { const T* vec = v->vec()->vds().data(); const std::size_t vec_size = v->vec()->vds().size(); loop_unroll::details lud(vec_size); if (vec_size <= static_cast<std::size_t>(lud.batch_size)) { T result = T(1); int i = 0; exprtk_disable_fallthrough_begin switch (vec_size) { #define case_stmt(N) \ case N : result *= vec[i++]; \ #ifndef exprtk_disable_superscalar_unroll case_stmt(16) case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) #endif case_stmt( 4) case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef case_stmt return result; } T r[] = { T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1), T(1) }; const T* upper_bound = vec + lud.upper_bound; while (vec < upper_bound) { #define exprtk_loop(N) \ r[N] *= vec[N]; \ exprtk_loop( 0) exprtk_loop( 1) exprtk_loop( 2) exprtk_loop( 3) #ifndef exprtk_disable_superscalar_unroll exprtk_loop( 4) exprtk_loop( 5) exprtk_loop( 6) exprtk_loop( 7) exprtk_loop( 8) exprtk_loop( 9) exprtk_loop(10) exprtk_loop(11) exprtk_loop(12) exprtk_loop(13) exprtk_loop(14) exprtk_loop(15) #endif vec += lud.batch_size; } int i = 0; exprtk_disable_fallthrough_begin switch (lud.remainder) { #define case_stmt(N) \ case N : r[0] *= vec[i++]; \ #ifndef exprtk_disable_superscalar_unroll case_stmt(15) case_stmt(14) case_stmt(13) case_stmt(12) case_stmt(11) case_stmt(10) case_stmt( 9) case_stmt( 8) case_stmt( 7) case_stmt( 6) case_stmt( 5) case_stmt( 4) #endif case_stmt( 3) case_stmt( 2) case_stmt( 1) } exprtk_disable_fallthrough_end #undef exprtk_loop #undef case_stmt return (r[ 0] * r[ 1] * r[ 2] * r[ 3]) #ifndef exprtk_disable_superscalar_unroll + (r[ 4] * r[ 5] * r[ 6] * r[ 7]) + (r[ 8] * r[ 9] * r[10] * r[11]) + (r[12] * r[13] * r[14] * r[15]) #endif ; } }; template <typename T> struct vec_avg_op { typedef vector_interface<T>* ivector_ptr; static inline T process(const ivector_ptr v) { const std::size_t vec_size = v->vec()->vds().size(); return vec_add_op<T>::process(v) / vec_size; } }; template <typename T> struct vec_min_op { typedef vector_interface<T>* ivector_ptr; static inline T process(const ivector_ptr v) { const T* vec = v->vec()->vds().data(); const std::size_t vec_size = v->vec()->vds().size(); T result = vec[0]; for (std::size_t i = 1; i < vec_size; ++i) { T v_i = vec[i]; if (v_i < result) result = v_i; } return result; } }; template <typename T> struct vec_max_op { typedef vector_interface<T>* ivector_ptr; static inline T process(const ivector_ptr v) { const T* vec = v->vec()->vds().data(); const std::size_t vec_size = v->vec()->vds().size(); T result = vec[0]; for (std::size_t i = 1; i < vec_size; ++i) { T v_i = vec[i]; if (v_i > result) result = v_i; } return result; } }; template <typename T> class vov_base_node : public expression_node<T> { public: virtual ~vov_base_node() {} inline virtual operator_type operation() const { return details::e_default; } virtual const T& v0() const = 0; virtual const T& v1() const = 0; }; template <typename T> class cov_base_node : public expression_node<T> { public: virtual ~cov_base_node() {} inline virtual operator_type operation() const { return details::e_default; } virtual const T c() const = 0; virtual const T& v() const = 0; }; template <typename T> class voc_base_node : public expression_node<T> { public: virtual ~voc_base_node() {} inline virtual operator_type operation() const { return details::e_default; } virtual const T c() const = 0; virtual const T& v() const = 0; }; template <typename T> class vob_base_node : public expression_node<T> { public: virtual ~vob_base_node() {} virtual const T& v() const = 0; }; template <typename T> class bov_base_node : public expression_node<T> { public: virtual ~bov_base_node() {} virtual const T& v() const = 0; }; template <typename T> class cob_base_node : public expression_node<T> { public: virtual ~cob_base_node() {} inline virtual operator_type operation() const { return details::e_default; } virtual const T c() const = 0; virtual void set_c(const T) = 0; virtual expression_node<T>* move_branch(const std::size_t& index) = 0; }; template <typename T> class boc_base_node : public expression_node<T> { public: virtual ~boc_base_node() {} inline virtual operator_type operation() const { return details::e_default; } virtual const T c() const = 0; virtual void set_c(const T) = 0; virtual expression_node<T>* move_branch(const std::size_t& index) = 0; }; template <typename T> class uv_base_node : public expression_node<T> { public: virtual ~uv_base_node() {} inline virtual operator_type operation() const { return details::e_default; } virtual const T& v() const = 0; }; template <typename T> class sos_base_node : public expression_node<T> { public: virtual ~sos_base_node() {} inline virtual operator_type operation() const { return details::e_default; } }; template <typename T> class sosos_base_node : public expression_node<T> { public: virtual ~sosos_base_node() {} inline virtual operator_type operation() const { return details::e_default; } }; template <typename T> class T0oT1oT2_base_node : public expression_node<T> { public: virtual ~T0oT1oT2_base_node() {} virtual std::string type_id() const = 0; }; template <typename T> class T0oT1oT2oT3_base_node : public expression_node<T> { public: virtual ~T0oT1oT2oT3_base_node() {} virtual std::string type_id() const = 0; }; template <typename T, typename Operation> class unary_variable_node : public uv_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; explicit unary_variable_node(const T& var) : v_(var) {} inline T value() const { return Operation::process(v_); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline const T& v() const { return v_; } private: unary_variable_node(unary_variable_node<T,Operation>&); unary_variable_node<T,Operation>& operator=(unary_variable_node<T,Operation>&); const T& v_; }; template <typename T> class uvouv_node : public expression_node<T> { public: // UOpr1(v0) Op UOpr2(v1) typedef expression_node<T>* expression_ptr; typedef typename details::functor_t<T> functor_t; typedef typename functor_t::bfunc_t bfunc_t; typedef typename functor_t::ufunc_t ufunc_t; explicit uvouv_node(const T& var0,const T& var1, ufunc_t uf0, ufunc_t uf1, bfunc_t bf) : v0_(var0), v1_(var1), u0_(uf0 ), u1_(uf1 ), f_ (bf ) {} inline T value() const { return f_(u0_(v0_),u1_(v1_)); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_uvouv; } inline operator_type operation() const { return details::e_default; } inline const T& v0() { return v0_; } inline const T& v1() { return v1_; } inline ufunc_t u0() { return u0_; } inline ufunc_t u1() { return u1_; } inline ufunc_t f() { return f_; } private: uvouv_node(uvouv_node<T>&); uvouv_node<T>& operator=(uvouv_node<T>&); const T& v0_; const T& v1_; const ufunc_t u0_; const ufunc_t u1_; const bfunc_t f_; }; template <typename T, typename Operation> class unary_branch_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; explicit unary_branch_node(expression_ptr brnch) : branch_(brnch), branch_deletable_(branch_deletable(branch_)) {} ~unary_branch_node() { if (branch_ && branch_deletable_) { destroy_node(branch_); } } inline T value() const { return Operation::process(branch_->value()); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline expression_node<T>* branch(const std::size_t&) const { return branch_; } inline void release() { branch_deletable_ = false; } private: unary_branch_node(unary_branch_node<T,Operation>&); unary_branch_node<T,Operation>& operator=(unary_branch_node<T,Operation>&); expression_ptr branch_; bool branch_deletable_; }; template <typename T> struct is_const { enum {result = 0}; }; template <typename T> struct is_const <const T> { enum {result = 1}; }; template <typename T> struct is_const_ref { enum {result = 0}; }; template <typename T> struct is_const_ref <const T&> { enum {result = 1}; }; template <typename T> struct is_ref { enum {result = 0}; }; template <typename T> struct is_ref<T&> { enum {result = 1}; }; template <typename T> struct is_ref<const T&> { enum {result = 0}; }; template <std::size_t State> struct param_to_str { static std::string result() { static const std::string r("v"); return r; } }; template <> struct param_to_str<0> { static std::string result() { static const std::string r("c"); return r; } }; #define exprtk_crtype(Type) \ param_to_str<is_const_ref< Type >::result>::result() \ template <typename T> struct T0oT1oT2process { typedef typename details::functor_t<T> functor_t; typedef typename functor_t::bfunc_t bfunc_t; struct mode0 { static inline T process(const T& t0, const T& t1, const T& t2, const bfunc_t bf0, const bfunc_t bf1) { // (T0 o0 T1) o1 T2 return bf1(bf0(t0,t1),t2); } template <typename T0, typename T1, typename T2> static inline std::string id() { static const std::string result = "(" + exprtk_crtype(T0) + "o" + exprtk_crtype(T1) + ")o(" + exprtk_crtype(T2) + ")" ; return result; } }; struct mode1 { static inline T process(const T& t0, const T& t1, const T& t2, const bfunc_t bf0, const bfunc_t bf1) { // T0 o0 (T1 o1 T2) return bf0(t0,bf1(t1,t2)); } template <typename T0, typename T1, typename T2> static inline std::string id() { static const std::string result = "(" + exprtk_crtype(T0) + ")o(" + exprtk_crtype(T1) + "o" + exprtk_crtype(T2) + ")" ; return result; } }; }; template <typename T> struct T0oT1oT20T3process { typedef typename details::functor_t<T> functor_t; typedef typename functor_t::bfunc_t bfunc_t; struct mode0 { static inline T process(const T& t0, const T& t1, const T& t2, const T& t3, const bfunc_t bf0, const bfunc_t bf1, const bfunc_t bf2) { // (T0 o0 T1) o1 (T2 o2 T3) return bf1(bf0(t0,t1),bf2(t2,t3)); } template <typename T0, typename T1, typename T2, typename T3> static inline std::string id() { static const std::string result = "(" + exprtk_crtype(T0) + "o" + exprtk_crtype(T1) + ")o" + "(" + exprtk_crtype(T2) + "o" + exprtk_crtype(T3) + ")" ; return result; } }; struct mode1 { static inline T process(const T& t0, const T& t1, const T& t2, const T& t3, const bfunc_t bf0, const bfunc_t bf1, const bfunc_t bf2) { // (T0 o0 (T1 o1 (T2 o2 T3)) return bf0(t0,bf1(t1,bf2(t2,t3))); } template <typename T0, typename T1, typename T2, typename T3> static inline std::string id() { static const std::string result = "(" + exprtk_crtype(T0) + ")o((" + exprtk_crtype(T1) + ")o(" + exprtk_crtype(T2) + "o" + exprtk_crtype(T3) + "))" ; return result; } }; struct mode2 { static inline T process(const T& t0, const T& t1, const T& t2, const T& t3, const bfunc_t bf0, const bfunc_t bf1, const bfunc_t bf2) { // (T0 o0 ((T1 o1 T2) o2 T3) return bf0(t0,bf2(bf1(t1,t2),t3)); } template <typename T0, typename T1, typename T2, typename T3> static inline std::string id() { static const std::string result = "(" + exprtk_crtype(T0) + ")o((" + exprtk_crtype(T1) + "o" + exprtk_crtype(T2) + ")o(" + exprtk_crtype(T3) + "))" ; return result; } }; struct mode3 { static inline T process(const T& t0, const T& t1, const T& t2, const T& t3, const bfunc_t bf0, const bfunc_t bf1, const bfunc_t bf2) { // (((T0 o0 T1) o1 T2) o2 T3) return bf2(bf1(bf0(t0,t1),t2),t3); } template <typename T0, typename T1, typename T2, typename T3> static inline std::string id() { static const std::string result = "((" + exprtk_crtype(T0) + "o" + exprtk_crtype(T1) + ")o(" + exprtk_crtype(T2) + "))o(" + exprtk_crtype(T3) + ")"; return result; } }; struct mode4 { static inline T process(const T& t0, const T& t1, const T& t2, const T& t3, const bfunc_t bf0, const bfunc_t bf1, const bfunc_t bf2) { // ((T0 o0 (T1 o1 T2)) o2 T3 return bf2(bf0(t0,bf1(t1,t2)),t3); } template <typename T0, typename T1, typename T2, typename T3> static inline std::string id() { static const std::string result = "((" + exprtk_crtype(T0) + ")o(" + exprtk_crtype(T1) + "o" + exprtk_crtype(T2) + "))o(" + exprtk_crtype(T3) + ")" ; return result; } }; }; #undef exprtk_crtype template <typename T, typename T0, typename T1> struct nodetype_T0oT1 { static const typename expression_node<T>::node_type result; }; template <typename T, typename T0, typename T1> const typename expression_node<T>::node_type nodetype_T0oT1<T,T0,T1>::result = expression_node<T>::e_none; #define synthesis_node_type_define(T0_,T1_,v_) \ template <typename T, typename T0, typename T1> \ struct nodetype_T0oT1<T,T0_,T1_> { static const typename expression_node<T>::node_type result; }; \ template <typename T, typename T0, typename T1> \ const typename expression_node<T>::node_type nodetype_T0oT1<T,T0_,T1_>::result = expression_node<T>:: v_; \ synthesis_node_type_define(const T0&,const T1&, e_vov) synthesis_node_type_define(const T0&,const T1 , e_voc) synthesis_node_type_define(const T0 ,const T1&, e_cov) synthesis_node_type_define( T0&, T1&,e_none) synthesis_node_type_define(const T0 ,const T1 ,e_none) synthesis_node_type_define( T0&,const T1 ,e_none) synthesis_node_type_define(const T0 , T1&,e_none) synthesis_node_type_define(const T0&, T1&,e_none) synthesis_node_type_define( T0&,const T1&,e_none) #undef synthesis_node_type_define template <typename T, typename T0, typename T1, typename T2> struct nodetype_T0oT1oT2 { static const typename expression_node<T>::node_type result; }; template <typename T, typename T0, typename T1, typename T2> const typename expression_node<T>::node_type nodetype_T0oT1oT2<T,T0,T1,T2>::result = expression_node<T>::e_none; #define synthesis_node_type_define(T0_,T1_,T2_,v_) \ template <typename T, typename T0, typename T1, typename T2> \ struct nodetype_T0oT1oT2<T,T0_,T1_,T2_> { static const typename expression_node<T>::node_type result; }; \ template <typename T, typename T0, typename T1, typename T2> \ const typename expression_node<T>::node_type nodetype_T0oT1oT2<T,T0_,T1_,T2_>::result = expression_node<T>:: v_; \ synthesis_node_type_define(const T0&,const T1&,const T2&, e_vovov) synthesis_node_type_define(const T0&,const T1&,const T2 , e_vovoc) synthesis_node_type_define(const T0&,const T1 ,const T2&, e_vocov) synthesis_node_type_define(const T0 ,const T1&,const T2&, e_covov) synthesis_node_type_define(const T0 ,const T1&,const T2 , e_covoc) synthesis_node_type_define(const T0 ,const T1 ,const T2 , e_none ) synthesis_node_type_define(const T0 ,const T1 ,const T2&, e_none ) synthesis_node_type_define(const T0&,const T1 ,const T2 , e_none ) synthesis_node_type_define( T0&, T1&, T2&, e_none ) #undef synthesis_node_type_define template <typename T, typename T0, typename T1, typename T2, typename T3> struct nodetype_T0oT1oT2oT3 { static const typename expression_node<T>::node_type result; }; template <typename T, typename T0, typename T1, typename T2, typename T3> const typename expression_node<T>::node_type nodetype_T0oT1oT2oT3<T,T0,T1,T2,T3>::result = expression_node<T>::e_none; #define synthesis_node_type_define(T0_,T1_,T2_,T3_,v_) \ template <typename T, typename T0, typename T1, typename T2, typename T3> \ struct nodetype_T0oT1oT2oT3<T,T0_,T1_,T2_,T3_> { static const typename expression_node<T>::node_type result; }; \ template <typename T, typename T0, typename T1, typename T2, typename T3> \ const typename expression_node<T>::node_type nodetype_T0oT1oT2oT3<T,T0_,T1_,T2_,T3_>::result = expression_node<T>:: v_; \ synthesis_node_type_define(const T0&,const T1&,const T2&, const T3&,e_vovovov) synthesis_node_type_define(const T0&,const T1&,const T2&, const T3 ,e_vovovoc) synthesis_node_type_define(const T0&,const T1&,const T2 , const T3&,e_vovocov) synthesis_node_type_define(const T0&,const T1 ,const T2&, const T3&,e_vocovov) synthesis_node_type_define(const T0 ,const T1&,const T2&, const T3&,e_covovov) synthesis_node_type_define(const T0 ,const T1&,const T2 , const T3&,e_covocov) synthesis_node_type_define(const T0&,const T1 ,const T2&, const T3 ,e_vocovoc) synthesis_node_type_define(const T0 ,const T1&,const T2&, const T3 ,e_covovoc) synthesis_node_type_define(const T0&,const T1 ,const T2 , const T3&,e_vococov) synthesis_node_type_define(const T0 ,const T1 ,const T2 , const T3 ,e_none ) synthesis_node_type_define(const T0 ,const T1 ,const T2 , const T3&,e_none ) synthesis_node_type_define(const T0 ,const T1 ,const T2&, const T3 ,e_none ) synthesis_node_type_define(const T0 ,const T1&,const T2 , const T3 ,e_none ) synthesis_node_type_define(const T0&,const T1 ,const T2 , const T3 ,e_none ) synthesis_node_type_define(const T0 ,const T1 ,const T2&, const T3&,e_none ) synthesis_node_type_define(const T0&,const T1&,const T2 , const T3 ,e_none ) #undef synthesis_node_type_define template <typename T, typename T0, typename T1> class T0oT1 : public expression_node<T> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::bfunc_t bfunc_t; typedef T value_type; typedef T0oT1<T,T0,T1> node_type; T0oT1(T0 p0, T1 p1, const bfunc_t p2) : t0_(p0), t1_(p1), f_ (p2) {} inline typename expression_node<T>::node_type type() const { static const typename expression_node<T>::node_type result = nodetype_T0oT1<T,T0,T1>::result; return result; } inline operator_type operation() const { return e_default; } inline T value() const { return f_(t0_,t1_); } inline T0 t0() const { return t0_; } inline T1 t1() const { return t1_; } inline bfunc_t f() const { return f_; } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, bfunc_t p2) { return allocator .template allocate_type<node_type,T0,T1,bfunc_t&> (p0, p1, p2); } private: T0oT1(T0oT1<T,T0,T1>&) {} T0oT1<T,T0,T1>& operator=(T0oT1<T,T0,T1>&) { return (*this); } T0 t0_; T1 t1_; const bfunc_t f_; }; template <typename T, typename T0, typename T1, typename T2, typename ProcessMode> class T0oT1oT2 : public T0oT1oT2_base_node<T> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::bfunc_t bfunc_t; typedef T value_type; typedef T0oT1oT2<T,T0,T1,T2,ProcessMode> node_type; typedef ProcessMode process_mode_t; T0oT1oT2(T0 p0, T1 p1, T2 p2, const bfunc_t p3, const bfunc_t p4) : t0_(p0), t1_(p1), t2_(p2), f0_(p3), f1_(p4) {} inline typename expression_node<T>::node_type type() const { static const typename expression_node<T>::node_type result = nodetype_T0oT1oT2<T,T0,T1,T2>::result; return result; } inline operator_type operation() const { return e_default; } inline T value() const { return ProcessMode::process(t0_,t1_,t2_,f0_,f1_); } inline T0 t0() const { return t0_; } inline T1 t1() const { return t1_; } inline T2 t2() const { return t2_; } bfunc_t f0() const { return f0_; } bfunc_t f1() const { return f1_; } std::string type_id() const { return id(); } static inline std::string id() { return process_mode_t::template id<T0,T1,T2>(); } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, T2 p2, bfunc_t p3, bfunc_t p4) { return allocator .template allocate_type<node_type,T0,T1,T2,bfunc_t,bfunc_t> (p0, p1, p2, p3, p4); } private: T0oT1oT2(node_type&) {} node_type& operator=(node_type&) { return (*this); } T0 t0_; T1 t1_; T2 t2_; const bfunc_t f0_; const bfunc_t f1_; }; template <typename T, typename T0_, typename T1_, typename T2_, typename T3_, typename ProcessMode> class T0oT1oT2oT3 : public T0oT1oT2oT3_base_node<T> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::bfunc_t bfunc_t; typedef T value_type; typedef T0_ T0; typedef T1_ T1; typedef T2_ T2; typedef T3_ T3; typedef T0oT1oT2oT3<T,T0,T1,T2,T3,ProcessMode> node_type; typedef ProcessMode process_mode_t; T0oT1oT2oT3(T0 p0, T1 p1, T2 p2, T3 p3, bfunc_t p4, bfunc_t p5, bfunc_t p6) : t0_(p0), t1_(p1), t2_(p2), t3_(p3), f0_(p4), f1_(p5), f2_(p6) {} inline T value() const { return ProcessMode::process(t0_, t1_, t2_, t3_, f0_, f1_, f2_); } inline T0 t0() const { return t0_; } inline T1 t1() const { return t1_; } inline T2 t2() const { return t2_; } inline T3 t3() const { return t3_; } inline bfunc_t f0() const { return f0_; } inline bfunc_t f1() const { return f1_; } inline bfunc_t f2() const { return f2_; } inline std::string type_id() const { return id(); } static inline std::string id() { return process_mode_t::template id<T0,T1,T2,T3>(); } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, T2 p2, T3 p3, bfunc_t p4, bfunc_t p5, bfunc_t p6) { return allocator .template allocate_type<node_type,T0,T1,T2,T3,bfunc_t,bfunc_t> (p0, p1, p2, p3, p4, p5, p6); } private: T0oT1oT2oT3(node_type&) {} node_type& operator=(node_type&) { return (*this); } T0 t0_; T1 t1_; T2 t2_; T3 t3_; const bfunc_t f0_; const bfunc_t f1_; const bfunc_t f2_; }; template <typename T, typename T0, typename T1, typename T2> class T0oT1oT2_sf3 : public T0oT1oT2_base_node<T> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::tfunc_t tfunc_t; typedef T value_type; typedef T0oT1oT2_sf3<T,T0,T1,T2> node_type; T0oT1oT2_sf3(T0 p0, T1 p1, T2 p2, const tfunc_t p3) : t0_(p0), t1_(p1), t2_(p2), f_ (p3) {} inline typename expression_node<T>::node_type type() const { static const typename expression_node<T>::node_type result = nodetype_T0oT1oT2<T,T0,T1,T2>::result; return result; } inline operator_type operation() const { return e_default; } inline T value() const { return f_(t0_, t1_, t2_); } inline T0 t0() const { return t0_; } inline T1 t1() const { return t1_; } inline T2 t2() const { return t2_; } tfunc_t f() const { return f_; } std::string type_id() const { return id(); } static inline std::string id() { return "sf3"; } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, T2 p2, tfunc_t p3) { return allocator .template allocate_type<node_type,T0,T1,T2,tfunc_t> (p0, p1, p2, p3); } private: T0oT1oT2_sf3(node_type&) {} node_type& operator=(node_type&) { return (*this); } T0 t0_; T1 t1_; T2 t2_; const tfunc_t f_; }; template <typename T, typename T0, typename T1, typename T2> class sf3ext_type_node : public T0oT1oT2_base_node<T> { public: virtual ~sf3ext_type_node() {} virtual T0 t0() const = 0; virtual T1 t1() const = 0; virtual T2 t2() const = 0; }; template <typename T, typename T0, typename T1, typename T2, typename SF3Operation> class T0oT1oT2_sf3ext : public sf3ext_type_node<T,T0,T1,T2> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::tfunc_t tfunc_t; typedef T value_type; typedef T0oT1oT2_sf3ext<T,T0,T1,T2,SF3Operation> node_type; T0oT1oT2_sf3ext(T0 p0, T1 p1, T2 p2) : t0_(p0), t1_(p1), t2_(p2) {} inline typename expression_node<T>::node_type type() const { static const typename expression_node<T>::node_type result = nodetype_T0oT1oT2<T,T0,T1,T2>::result; return result; } inline operator_type operation() const { return e_default; } inline T value() const { return SF3Operation::process(t0_, t1_, t2_); } T0 t0() const { return t0_; } T1 t1() const { return t1_; } T2 t2() const { return t2_; } std::string type_id() const { return id(); } static inline std::string id() { return SF3Operation::id(); } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, T2 p2) { return allocator .template allocate_type<node_type,T0,T1,T2> (p0, p1, p2); } private: T0oT1oT2_sf3ext(node_type&) {} node_type& operator=(node_type&) { return (*this); } T0 t0_; T1 t1_; T2 t2_; }; template <typename T> inline bool is_sf3ext_node(const expression_node<T>* n) { switch (n->type()) { case expression_node<T>::e_vovov : return true; case expression_node<T>::e_vovoc : return true; case expression_node<T>::e_vocov : return true; case expression_node<T>::e_covov : return true; case expression_node<T>::e_covoc : return true; default : return false; } } template <typename T, typename T0, typename T1, typename T2, typename T3> class T0oT1oT2oT3_sf4 : public T0oT1oT2_base_node<T> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::qfunc_t qfunc_t; typedef T value_type; typedef T0oT1oT2oT3_sf4<T,T0,T1,T2,T3> node_type; T0oT1oT2oT3_sf4(T0 p0, T1 p1, T2 p2, T3 p3, const qfunc_t p4) : t0_(p0), t1_(p1), t2_(p2), t3_(p3), f_ (p4) {} inline typename expression_node<T>::node_type type() const { static const typename expression_node<T>::node_type result = nodetype_T0oT1oT2oT3<T,T0,T1,T2,T3>::result; return result; } inline operator_type operation() const { return e_default; } inline T value() const { return f_(t0_, t1_, t2_, t3_); } inline T0 t0() const { return t0_; } inline T1 t1() const { return t1_; } inline T2 t2() const { return t2_; } inline T3 t3() const { return t3_; } qfunc_t f() const { return f_; } std::string type_id() const { return id(); } static inline std::string id() { return "sf4"; } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, T2 p2, T3 p3, qfunc_t p4) { return allocator .template allocate_type<node_type,T0,T1,T2,T3,qfunc_t> (p0, p1, p2, p3, p4); } private: T0oT1oT2oT3_sf4(node_type&) {} node_type& operator=(node_type&) { return (*this); } T0 t0_; T1 t1_; T2 t2_; T3 t3_; const qfunc_t f_; }; template <typename T, typename T0, typename T1, typename T2, typename T3, typename SF4Operation> class T0oT1oT2oT3_sf4ext : public T0oT1oT2oT3_base_node<T> { public: typedef typename details::functor_t<T> functor_t; typedef typename functor_t::tfunc_t tfunc_t; typedef T value_type; typedef T0oT1oT2oT3_sf4ext<T,T0,T1,T2,T3,SF4Operation> node_type; T0oT1oT2oT3_sf4ext(T0 p0, T1 p1, T2 p2, T3 p3) : t0_(p0), t1_(p1), t2_(p2), t3_(p3) {} inline typename expression_node<T>::node_type type() const { static const typename expression_node<T>::node_type result = nodetype_T0oT1oT2oT3<T,T0,T1,T2,T3>::result; return result; } inline operator_type operation() const { return e_default; } inline T value() const { return SF4Operation::process(t0_, t1_, t2_, t3_); } inline T0 t0() const { return t0_; } inline T1 t1() const { return t1_; } inline T2 t2() const { return t2_; } inline T3 t3() const { return t2_; } std::string type_id() const { return id(); } static inline std::string id() { return SF4Operation::id(); } template <typename Allocator> static inline expression_node<T>* allocate(Allocator& allocator, T0 p0, T1 p1, T2 p2, T3 p3) { return allocator .template allocate_type<node_type,T0,T1,T2,T3> (p0, p1, p2, p3); } private: T0oT1oT2oT3_sf4ext(node_type&) {} node_type& operator=(node_type&) { return (*this); } T0 t0_; T1 t1_; T2 t2_; T3 t3_; }; template <typename T> inline bool is_sf4ext_node(const expression_node<T>* n) { switch (n->type()) { case expression_node<T>::e_vovovov : return true; case expression_node<T>::e_vovovoc : return true; case expression_node<T>::e_vovocov : return true; case expression_node<T>::e_vocovov : return true; case expression_node<T>::e_covovov : return true; case expression_node<T>::e_covocov : return true; case expression_node<T>::e_vocovoc : return true; case expression_node<T>::e_covovoc : return true; case expression_node<T>::e_vococov : return true; default : return false; } } template <typename T, typename T0, typename T1> struct T0oT1_define { typedef details::T0oT1<T,T0,T1> type0; }; template <typename T, typename T0, typename T1, typename T2> struct T0oT1oT2_define { typedef details::T0oT1oT2<T,T0,T1,T2,typename T0oT1oT2process<T>::mode0> type0; typedef details::T0oT1oT2<T,T0,T1,T2,typename T0oT1oT2process<T>::mode1> type1; typedef details::T0oT1oT2_sf3<T,T0,T1,T2> sf3_type; typedef details::sf3ext_type_node<T,T0,T1,T2> sf3_type_node; }; template <typename T, typename T0, typename T1, typename T2, typename T3> struct T0oT1oT2oT3_define { typedef details::T0oT1oT2oT3<T,T0,T1,T2,T3,typename T0oT1oT20T3process<T>::mode0> type0; typedef details::T0oT1oT2oT3<T,T0,T1,T2,T3,typename T0oT1oT20T3process<T>::mode1> type1; typedef details::T0oT1oT2oT3<T,T0,T1,T2,T3,typename T0oT1oT20T3process<T>::mode2> type2; typedef details::T0oT1oT2oT3<T,T0,T1,T2,T3,typename T0oT1oT20T3process<T>::mode3> type3; typedef details::T0oT1oT2oT3<T,T0,T1,T2,T3,typename T0oT1oT20T3process<T>::mode4> type4; typedef details::T0oT1oT2oT3_sf4<T,T0,T1,T2,T3> sf4_type; }; template <typename T, typename Operation> class vov_node : public vov_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // variable op variable node explicit vov_node(const T& var0, const T& var1) : v0_(var0), v1_(var1) {} inline T value() const { return Operation::process(v0_,v1_); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline const T& v0() const { return v0_; } inline const T& v1() const { return v1_; } protected: const T& v0_; const T& v1_; private: vov_node(vov_node<T,Operation>&); vov_node<T,Operation>& operator=(vov_node<T,Operation>&); }; template <typename T, typename Operation> class cov_node : public cov_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // constant op variable node explicit cov_node(const T& const_var, const T& var) : c_(const_var), v_(var) {} inline T value() const { return Operation::process(c_,v_); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline const T c() const { return c_; } inline const T& v() const { return v_; } protected: const T c_; const T& v_; private: cov_node(const cov_node<T,Operation>&); cov_node<T,Operation>& operator=(const cov_node<T,Operation>&); }; template <typename T, typename Operation> class voc_node : public voc_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // variable op constant node explicit voc_node(const T& var, const T& const_var) : v_(var), c_(const_var) {} inline T value() const { return Operation::process(v_,c_); } inline operator_type operation() const { return Operation::operation(); } inline const T c() const { return c_; } inline const T& v() const { return v_; } protected: const T& v_; const T c_; private: voc_node(const voc_node<T,Operation>&); voc_node<T,Operation>& operator=(const voc_node<T,Operation>&); }; template <typename T, typename Operation> class vob_node : public vob_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; typedef Operation operation_t; // variable op constant node explicit vob_node(const T& var, const expression_ptr brnch) : v_(var) { init_branches<1>(branch_,brnch); } ~vob_node() { cleanup_branches::execute<T,1>(branch_); } inline T value() const { return Operation::process(v_,branch_[0].first->value()); } inline operator_type operation() const { return Operation::operation(); } inline const T& v() const { return v_; } inline expression_node<T>* branch(const std::size_t&) const { return branch_[0].first; } private: vob_node(const vob_node<T,Operation>&); vob_node<T,Operation>& operator=(const vob_node<T,Operation>&); const T& v_; branch_t branch_[1]; }; template <typename T, typename Operation> class bov_node : public bov_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; typedef Operation operation_t; // variable op constant node explicit bov_node(const expression_ptr brnch, const T& var) : v_(var) { init_branches<1>(branch_,brnch); } ~bov_node() { cleanup_branches::execute<T,1>(branch_); } inline T value() const { return Operation::process(branch_[0].first->value(),v_); } inline operator_type operation() const { return Operation::operation(); } inline const T& v() const { return v_; } inline expression_node<T>* branch(const std::size_t&) const { return branch_[0].first; } private: bov_node(const bov_node<T,Operation>&); bov_node<T,Operation>& operator=(const bov_node<T,Operation>&); const T& v_; branch_t branch_[1]; }; template <typename T, typename Operation> class cob_node : public cob_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; typedef Operation operation_t; // variable op constant node explicit cob_node(const T const_var, const expression_ptr brnch) : c_(const_var) { init_branches<1>(branch_,brnch); } ~cob_node() { cleanup_branches::execute<T,1>(branch_); } inline T value() const { return Operation::process(c_,branch_[0].first->value()); } inline operator_type operation() const { return Operation::operation(); } inline const T c() const { return c_; } inline void set_c(const T new_c) { (*const_cast<T*>(&c_)) = new_c; } inline expression_node<T>* branch(const std::size_t&) const { return branch_[0].first; } inline expression_node<T>* move_branch(const std::size_t&) { branch_[0].second = false; return branch_[0].first; } private: cob_node(const cob_node<T,Operation>&); cob_node<T,Operation>& operator=(const cob_node<T,Operation>&); const T c_; branch_t branch_[1]; }; template <typename T, typename Operation> class boc_node : public boc_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr,bool> branch_t; typedef Operation operation_t; // variable op constant node explicit boc_node(const expression_ptr brnch, const T const_var) : c_(const_var) { init_branches<1>(branch_,brnch); } ~boc_node() { cleanup_branches::execute<T,1>(branch_); } inline T value() const { return Operation::process(branch_[0].first->value(),c_); } inline operator_type operation() const { return Operation::operation(); } inline const T c() const { return c_; } inline void set_c(const T new_c) { (*const_cast<T*>(&c_)) = new_c; } inline expression_node<T>* branch(const std::size_t&) const { return branch_[0].first; } inline expression_node<T>* move_branch(const std::size_t&) { branch_[0].second = false; return branch_[0].first; } private: boc_node(const boc_node<T,Operation>&); boc_node<T,Operation>& operator=(const boc_node<T,Operation>&); const T c_; branch_t branch_[1]; }; #ifndef exprtk_disable_string_capabilities template <typename T, typename SType0, typename SType1, typename Operation> class sos_node : public sos_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // string op string node explicit sos_node(SType0 p0, SType1 p1) : s0_(p0), s1_(p1) {} inline T value() const { return Operation::process(s0_,s1_); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline std::string& s0() { return s0_; } inline std::string& s1() { return s1_; } protected: SType0 s0_; SType1 s1_; private: sos_node(sos_node<T,SType0,SType1,Operation>&); sos_node<T,SType0,SType1,Operation>& operator=(sos_node<T,SType0,SType1,Operation>&); }; template <typename T, typename SType0, typename SType1, typename RangePack, typename Operation> class str_xrox_node : public sos_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // string-range op string node explicit str_xrox_node(SType0 p0, SType1 p1, RangePack rp0) : s0_ (p0 ), s1_ (p1 ), rp0_(rp0) {} ~str_xrox_node() { rp0_.free(); } inline T value() const { std::size_t r0 = 0; std::size_t r1 = 0; if (rp0_(r0, r1, s0_.size())) return Operation::process(s0_.substr(r0, (r1 - r0) + 1), s1_); else return T(0); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline std::string& s0() { return s0_; } inline std::string& s1() { return s1_; } protected: SType0 s0_; SType1 s1_; RangePack rp0_; private: str_xrox_node(str_xrox_node<T,SType0,SType1,RangePack,Operation>&); str_xrox_node<T,SType0,SType1,RangePack,Operation>& operator=(str_xrox_node<T,SType0,SType1,RangePack,Operation>&); }; template <typename T, typename SType0, typename SType1, typename RangePack, typename Operation> class str_xoxr_node : public sos_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // string op string range node explicit str_xoxr_node(SType0 p0, SType1 p1, RangePack rp1) : s0_ (p0 ), s1_ (p1 ), rp1_(rp1) {} ~str_xoxr_node() { rp1_.free(); } inline T value() const { std::size_t r0 = 0; std::size_t r1 = 0; if (rp1_(r0, r1, s1_.size())) return Operation::process(s0_, s1_.substr(r0, (r1 - r0) + 1)); else return T(0); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline std::string& s0() { return s0_; } inline std::string& s1() { return s1_; } protected: SType0 s0_; SType1 s1_; RangePack rp1_; private: str_xoxr_node(str_xoxr_node<T,SType0,SType1,RangePack,Operation>&); str_xoxr_node<T,SType0,SType1,RangePack,Operation>& operator=(str_xoxr_node<T,SType0,SType1,RangePack,Operation>&); }; template <typename T, typename SType0, typename SType1, typename RangePack, typename Operation> class str_xroxr_node : public sos_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // string-range op string-range node explicit str_xroxr_node(SType0 p0, SType1 p1, RangePack rp0, RangePack rp1) : s0_ (p0 ), s1_ (p1 ), rp0_(rp0), rp1_(rp1) {} ~str_xroxr_node() { rp0_.free(); rp1_.free(); } inline T value() const { std::size_t r0_0 = 0; std::size_t r0_1 = 0; std::size_t r1_0 = 0; std::size_t r1_1 = 0; if ( rp0_(r0_0, r1_0, s0_.size()) && rp1_(r0_1, r1_1, s1_.size()) ) { return Operation::process( s0_.substr(r0_0, (r1_0 - r0_0) + 1), s1_.substr(r0_1, (r1_1 - r0_1) + 1) ); } else return T(0); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline std::string& s0() { return s0_; } inline std::string& s1() { return s1_; } protected: SType0 s0_; SType1 s1_; RangePack rp0_; RangePack rp1_; private: str_xroxr_node(str_xroxr_node<T,SType0,SType1,RangePack,Operation>&); str_xroxr_node<T,SType0,SType1,RangePack,Operation>& operator=(str_xroxr_node<T,SType0,SType1,RangePack,Operation>&); }; template <typename T, typename Operation> class str_sogens_node : public binary_node<T> { public: typedef expression_node <T>* expression_ptr; typedef string_base_node<T>* str_base_ptr; typedef range_pack <T> range_t; typedef range_t* range_ptr; typedef range_interface<T> irange_t; typedef irange_t* irange_ptr; str_sogens_node(const operator_type& opr, expression_ptr branch0, expression_ptr branch1) : binary_node<T>(opr, branch0, branch1), str0_base_ptr_ (0), str1_base_ptr_ (0), str0_range_ptr_(0), str1_range_ptr_(0) { if (is_generally_string_node(binary_node<T>::branch_[0].first)) { str0_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[0].first); if (0 == str0_base_ptr_) return; irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[0].first); if (0 == range) return; str0_range_ptr_ = &(range->range_ref()); } if (is_generally_string_node(binary_node<T>::branch_[1].first)) { str1_base_ptr_ = dynamic_cast<str_base_ptr>(binary_node<T>::branch_[1].first); if (0 == str1_base_ptr_) return; irange_ptr range = dynamic_cast<irange_ptr>(binary_node<T>::branch_[1].first); if (0 == range) return; str1_range_ptr_ = &(range->range_ref()); } } inline T value() const { if ( str0_base_ptr_ && str1_base_ptr_ && str0_range_ptr_ && str1_range_ptr_ ) { binary_node<T>::branch_[0].first->value(); binary_node<T>::branch_[1].first->value(); std::size_t str0_r0 = 0; std::size_t str0_r1 = 0; std::size_t str1_r0 = 0; std::size_t str1_r1 = 0; range_t& range0 = (*str0_range_ptr_); range_t& range1 = (*str1_range_ptr_); if ( range0(str0_r0, str0_r1, str0_base_ptr_->size()) && range1(str1_r0, str1_r1, str1_base_ptr_->size()) ) { return Operation::process( str0_base_ptr_->str().substr(str0_r0,(str0_r1 - str0_r0) + 1), str1_base_ptr_->str().substr(str1_r0,(str1_r1 - str1_r0) + 1) ); } } return std::numeric_limits<T>::quiet_NaN(); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } private: str_sogens_node(str_sogens_node<T,Operation>&); str_sogens_node<T,Operation>& operator=(str_sogens_node<T,Operation>&); str_base_ptr str0_base_ptr_; str_base_ptr str1_base_ptr_; range_ptr str0_range_ptr_; range_ptr str1_range_ptr_; }; template <typename T, typename SType0, typename SType1, typename SType2, typename Operation> class sosos_node : public sosos_base_node<T> { public: typedef expression_node<T>* expression_ptr; typedef Operation operation_t; // variable op variable node explicit sosos_node(SType0 p0, SType1 p1, SType2 p2) : s0_(p0), s1_(p1), s2_(p2) {} inline T value() const { return Operation::process(s0_,s1_,s2_); } inline typename expression_node<T>::node_type type() const { return Operation::type(); } inline operator_type operation() const { return Operation::operation(); } inline std::string& s0() { return s0_; } inline std::string& s1() { return s1_; } inline std::string& s2() { return s2_; } protected: SType0 s0_; SType1 s1_; SType2 s2_; private: sosos_node(sosos_node<T,SType0,SType1,SType2,Operation>&); sosos_node<T,SType0,SType1,SType2,Operation>& operator=(sosos_node<T,SType0,SType1,SType2,Operation>&); }; #endif template <typename T, typename PowOp> class ipow_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef PowOp operation_t; explicit ipow_node(const T& v) : v_(v) {} inline T value() const { return PowOp::result(v_); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_ipow; } private: ipow_node(const ipow_node<T,PowOp>&); ipow_node<T,PowOp>& operator=(const ipow_node<T,PowOp>&); const T& v_; }; template <typename T, typename PowOp> class bipow_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr, bool> branch_t; typedef PowOp operation_t; explicit bipow_node(expression_ptr brnch) { init_branches<1>(branch_, brnch); } ~bipow_node() { cleanup_branches::execute<T,1>(branch_); } inline T value() const { return PowOp::result(branch_[0].first->value()); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_ipow; } private: bipow_node(const bipow_node<T,PowOp>&); bipow_node<T,PowOp>& operator=(const bipow_node<T,PowOp>&); branch_t branch_[1]; }; template <typename T, typename PowOp> class ipowinv_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef PowOp operation_t; explicit ipowinv_node(const T& v) : v_(v) {} inline T value() const { return (T(1) / PowOp::result(v_)); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_ipowinv; } private: ipowinv_node(const ipowinv_node<T,PowOp>&); ipowinv_node<T,PowOp>& operator=(const ipowinv_node<T,PowOp>&); const T& v_; }; template <typename T, typename PowOp> class bipowninv_node : public expression_node<T> { public: typedef expression_node<T>* expression_ptr; typedef std::pair<expression_ptr, bool> branch_t; typedef PowOp operation_t; explicit bipowninv_node(expression_ptr brnch) { init_branches<1>(branch_, brnch); } ~bipowninv_node() { cleanup_branches::execute<T,1>(branch_); } inline T value() const { return (T(1) / PowOp::result(branch_[0].first->value())); } inline typename expression_node<T>::node_type type() const { return expression_node<T>::e_ipowinv; } private: bipowninv_node(const bipowninv_node<T,PowOp>&); bipowninv_node<T,PowOp>& operator=(const bipowninv_node<T,PowOp>&); branch_t branch_[1]; }; template <typename T> inline bool is_vov_node(const expression_node<T>* node) { return (0 != dynamic_cast<const vov_base_node<T>*>(node)); } template <typename T> inline bool is_cov_node(const expression_node<T>* node) { return (0 != dynamic_cast<const cov_base_node<T>*>(node)); } template <typename T> inline bool is_voc_node(const expression_node<T>* node) { return (0 != dynamic_cast<const voc_base_node<T>*>(node)); } template <typename T> inline bool is_cob_node(const expression_node<T>* node) { return (0 != dynamic_cast<const cob_base_node<T>*>(node)); } template <typename T> inline bool is_boc_node(const expression_node<T>* node) { return (0 != dynamic_cast<const boc_base_node<T>*>(node)); } template <typename T> inline bool is_t0ot1ot2_node(const expression_node<T>* node) { return (0 != dynamic_cast<const T0oT1oT2_base_node<T>*>(node)); } template <typename T> inline bool is_t0ot1ot2ot3_node(const expression_node<T>* node) { return (0 != dynamic_cast<const T0oT1oT2oT3_base_node<T>*>(node)); } template <typename T> inline bool is_uv_node(const expression_node<T>* node) { return (0 != dynamic_cast<const uv_base_node<T>*>(node)); } template <typename T> inline bool is_string_node(const expression_node<T>* node) { return node && (expression_node<T>::e_stringvar == node->type()); } template <typename T> inline bool is_string_range_node(const expression_node<T>* node) { return node && (expression_node<T>::e_stringvarrng == node->type()); } template <typename T> inline bool is_const_string_node(const expression_node<T>* node) { return node && (expression_node<T>::e_stringconst == node->type()); } template <typename T> inline bool is_const_string_range_node(const expression_node<T>* node) { return node && (expression_node<T>::e_cstringvarrng == node->type()); } template <typename T> inline bool is_string_assignment_node(const expression_node<T>* node) { return node && (expression_node<T>::e_strass == node->type()); } template <typename T> inline bool is_string_concat_node(const expression_node<T>* node) { return node && (expression_node<T>::e_strconcat == node->type()); } template <typename T> inline bool is_string_function_node(const expression_node<T>* node) { return node && (expression_node<T>::e_strfunction == node->type()); } template <typename T> inline bool is_string_condition_node(const expression_node<T>* node) { return node && (expression_node<T>::e_strcondition == node->type()); } template <typename T> inline bool is_string_ccondition_node(const expression_node<T>* node) { return node && (expression_node<T>::e_strccondition == node->type()); } template <typename T> inline bool is_string_vararg_node(const expression_node<T>* node) { return node && (expression_node<T>::e_stringvararg == node->type()); } template <typename T> inline bool is_genricstring_range_node(const expression_node<T>* node) { return node && (expression_node<T>::e_strgenrange == node->type()); } template <typename T> inline bool is_generally_string_node(const expression_node<T>* node) { if (node) { switch (node->type()) { case expression_node<T>::e_stringvar : case expression_node<T>::e_stringconst : case expression_node<T>::e_stringvarrng : case expression_node<T>::e_cstringvarrng : case expression_node<T>::e_strgenrange : case expression_node<T>::e_strass : case expression_node<T>::e_strconcat : case expression_node<T>::e_strfunction : case expression_node<T>::e_strcondition : case expression_node<T>::e_strccondition : case expression_node<T>::e_stringvararg : return true; default : return false; } } return false; } class node_allocator { public: template <typename ResultNode, typename OpType, typename ExprNode> inline expression_node<typename ResultNode::value_type>* allocate(OpType& operation, ExprNode (&branch)[1]) { return allocate<ResultNode>(operation,branch[0]); } template <typename ResultNode, typename OpType, typename ExprNode> inline expression_node<typename ResultNode::value_type>* allocate(OpType& operation, ExprNode (&branch)[2]) { return allocate<ResultNode>(operation,branch[0],branch[1]); } template <typename ResultNode, typename OpType, typename ExprNode> inline expression_node<typename ResultNode::value_type>* allocate(OpType& operation, ExprNode (&branch)[3]) { return allocate<ResultNode>(operation,branch[0],branch[1],branch[2]); } template <typename ResultNode, typename OpType, typename ExprNode> inline expression_node<typename ResultNode::value_type>* allocate(OpType& operation, ExprNode (&branch)[4]) { return allocate<ResultNode>(operation,branch[0],branch[1],branch[2],branch[3]); } template <typename ResultNode, typename OpType, typename ExprNode> inline expression_node<typename ResultNode::value_type>* allocate(OpType& operation, ExprNode (&branch)[5]) { return allocate<ResultNode>(operation,branch[0],branch[1],branch[2],branch[3],branch[4]); } template <typename ResultNode, typename OpType, typename ExprNode> inline expression_node<typename ResultNode::value_type>* allocate(OpType& operation, ExprNode (&branch)[6]) { return allocate<ResultNode>(operation,branch[0],branch[1],branch[2],branch[3],branch[4],branch[5]); } template <typename node_type> inline expression_node<typename node_type::value_type>* allocate() const { return (new node_type()); } template <typename node_type, typename Type, typename Allocator, template <typename,typename> class Sequence> inline expression_node<typename node_type::value_type>* allocate(const Sequence<Type,Allocator>& seq) const { return (new node_type(seq)); } template <typename node_type, typename T1> inline expression_node<typename node_type::value_type>* allocate(T1& t1) const { return (new node_type(t1)); } template <typename node_type, typename T1> inline expression_node<typename node_type::value_type>* allocate_c(const T1& t1) const { return (new node_type(t1)); } template <typename node_type, typename T1, typename T2> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2) const { return (new node_type(t1, t2)); } template <typename node_type, typename T1, typename T2> inline expression_node<typename node_type::value_type>* allocate_cr(const T1& t1, T2& t2) const { return (new node_type(t1, t2)); } template <typename node_type, typename T1, typename T2> inline expression_node<typename node_type::value_type>* allocate_rc(T1& t1, const T2& t2) const { return (new node_type(t1, t2)); } template <typename node_type, typename T1, typename T2> inline expression_node<typename node_type::value_type>* allocate_rr(T1& t1, T2& t2) const { return (new node_type(t1, t2)); } template <typename node_type, typename T1, typename T2> inline expression_node<typename node_type::value_type>* allocate_tt(T1 t1, T2 t2) const { return (new node_type(t1, t2)); } template <typename node_type, typename T1, typename T2, typename T3> inline expression_node<typename node_type::value_type>* allocate_ttt(T1 t1, T2 t2, T3 t3) const { return (new node_type(t1, t2, t3)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4> inline expression_node<typename node_type::value_type>* allocate_tttt(T1 t1, T2 t2, T3 t3, T4 t4) const { return (new node_type(t1, t2, t3, t4)); } template <typename node_type, typename T1, typename T2, typename T3> inline expression_node<typename node_type::value_type>* allocate_rrr(T1& t1, T2& t2, T3& t3) const { return (new node_type(t1, t2, t3)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4> inline expression_node<typename node_type::value_type>* allocate_rrrr(T1& t1, T2& t2, T3& t3, T4& t4) const { return (new node_type(t1, t2, t3, t4)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5> inline expression_node<typename node_type::value_type>* allocate_rrrrr(T1& t1, T2& t2, T3& t3, T4& t4, T5& t5) const { return (new node_type(t1, t2, t3, t4, t5)); } template <typename node_type, typename T1, typename T2, typename T3> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3) const { return (new node_type(t1, t2, t3)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4) const { return (new node_type(t1, t2, t3, t4)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5) const { return (new node_type(t1, t2, t3, t4, t5)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6) const { return (new node_type(t1, t2, t3, t4, t5, t6)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7) const { return (new node_type(t1, t2, t3, t4, t5, t6, t7)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8) const { return (new node_type(t1, t2, t3, t4, t5, t6, t7, t8)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8, const T9& t9) const { return (new node_type(t1, t2, t3, t4, t5, t6, t7, t8, t9)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> inline expression_node<typename node_type::value_type>* allocate(const T1& t1, const T2& t2, const T3& t3, const T4& t4, const T5& t5, const T6& t6, const T7& t7, const T8& t8, const T9& t9, const T10& t10) const { return (new node_type(t1, t2, t3, t4, t5, t6, t7, t8, t9, t10)); } template <typename node_type, typename T1, typename T2, typename T3> inline expression_node<typename node_type::value_type>* allocate_type(T1 t1, T2 t2, T3 t3) const { return (new node_type(t1, t2, t3)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4> inline expression_node<typename node_type::value_type>* allocate_type(T1 t1, T2 t2, T3 t3, T4 t4) const { return (new node_type(t1, t2, t3, t4)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5> inline expression_node<typename node_type::value_type>* allocate_type(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5) const { return (new node_type(t1, t2, t3, t4, t5)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> inline expression_node<typename node_type::value_type>* allocate_type(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6) const { return (new node_type(t1, t2, t3, t4, t5, t6)); } template <typename node_type, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> inline expression_node<typename node_type::value_type>* allocate_type(T1 t1, T2 t2, T3 t3, T4 t4, T5 t5, T6 t6, T7 t7) const { return (new node_type(t1, t2, t3, t4, t5, t6, t7)); } template <typename T> void inline free(expression_node<T>*& e) const { delete e; e = 0; } }; inline void load_operations_map(std::multimap<std::string,details::base_operation_t,details::ilesscompare>& m) { #define register_op(Symbol,Type,Args) \ m.insert(std::make_pair(std::string(Symbol),details::base_operation_t(Type,Args))); \ register_op( "abs", e_abs , 1) register_op( "acos", e_acos , 1) register_op( "acosh", e_acosh , 1) register_op( "asin", e_asin , 1) register_op( "asinh", e_asinh , 1) register_op( "atan", e_atan , 1) register_op( "atanh", e_atanh , 1) register_op( "ceil", e_ceil , 1) register_op( "cos", e_cos , 1) register_op( "cosh", e_cosh , 1) register_op( "exp", e_exp , 1) register_op( "expm1", e_expm1 , 1) register_op( "floor", e_floor , 1) register_op( "log", e_log , 1) register_op( "log10", e_log10 , 1) register_op( "log2", e_log2 , 1) register_op( "log1p", e_log1p , 1) register_op( "round", e_round , 1) register_op( "sin", e_sin , 1) register_op( "sinc", e_sinc , 1) register_op( "sinh", e_sinh , 1) register_op( "sec", e_sec , 1) register_op( "csc", e_csc , 1) register_op( "sqrt", e_sqrt , 1) register_op( "tan", e_tan , 1) register_op( "tanh", e_tanh , 1) register_op( "cot", e_cot , 1) register_op( "rad2deg", e_r2d , 1) register_op( "deg2rad", e_d2r , 1) register_op( "deg2grad", e_d2g , 1) register_op( "grad2deg", e_g2d , 1) register_op( "sgn", e_sgn , 1) register_op( "not", e_notl , 1) register_op( "erf", e_erf , 1) register_op( "erfc", e_erfc , 1) register_op( "ncdf", e_ncdf , 1) register_op( "frac", e_frac , 1) register_op( "trunc", e_trunc , 1) register_op( "atan2", e_atan2 , 2) register_op( "mod", e_mod , 2) register_op( "logn", e_logn , 2) register_op( "pow", e_pow , 2) register_op( "root", e_root , 2) register_op( "roundn", e_roundn , 2) register_op( "equal", e_equal , 2) register_op("not_equal", e_nequal , 2) register_op( "hypot", e_hypot , 2) register_op( "shr", e_shr , 2) register_op( "shl", e_shl , 2) register_op( "clamp", e_clamp , 3) register_op( "iclamp", e_iclamp , 3) register_op( "inrange", e_inrange , 3) #undef register_op } } // namespace details class function_traits { public: function_traits() : allow_zero_parameters_(false), has_side_effects_(true), min_num_args_(0), max_num_args_(std::numeric_limits<std::size_t>::max()) {} inline bool& allow_zero_parameters() { return allow_zero_parameters_; } inline bool& has_side_effects() { return has_side_effects_; } std::size_t& min_num_args() { return min_num_args_; } std::size_t& max_num_args() { return max_num_args_; } private: bool allow_zero_parameters_; bool has_side_effects_; std::size_t min_num_args_; std::size_t max_num_args_; }; template <typename FunctionType> void enable_zero_parameters(FunctionType& func) { func.allow_zero_parameters() = true; if (0 != func.min_num_args()) { func.min_num_args() = 0; } } template <typename FunctionType> void disable_zero_parameters(FunctionType& func) { func.allow_zero_parameters() = false; } template <typename FunctionType> void enable_has_side_effects(FunctionType& func) { func.has_side_effects() = true; } template <typename FunctionType> void disable_has_side_effects(FunctionType& func) { func.has_side_effects() = false; } template <typename FunctionType> void set_min_num_args(FunctionType& func, const std::size_t& num_args) { func.min_num_args() = num_args; if ((0 != func.min_num_args()) && func.allow_zero_parameters()) func.allow_zero_parameters() = false; } template <typename FunctionType> void set_max_num_args(FunctionType& func, const std::size_t& num_args) { func.max_num_args() = num_args; } template <typename T> class ifunction : public function_traits { public: explicit ifunction(const std::size_t& pc) : param_count(pc) {} virtual ~ifunction() {} #define empty_method_body \ { \ return std::numeric_limits<T>::quiet_NaN(); \ } \ inline virtual T operator() () empty_method_body inline virtual T operator() (const T&) empty_method_body inline virtual T operator() (const T&,const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body inline virtual T operator() (const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&, const T&) empty_method_body #undef empty_method_body std::size_t param_count; }; template <typename T> class ivararg_function : public function_traits { public: virtual ~ivararg_function() {} inline virtual T operator() (const std::vector<T>&) { exprtk_debug(("ivararg_function::operator() - Operator has not been overridden.\n")); return std::numeric_limits<T>::quiet_NaN(); } }; template <typename T> class igeneric_function : public function_traits { public: enum return_type { e_rtrn_scalar = 0, e_rtrn_string = 1 }; typedef T type; typedef type_store<T> generic_type; typedef typename generic_type::parameter_list parameter_list_t; igeneric_function(const std::string& param_seq = "", const return_type rtr_type = e_rtrn_scalar) : parameter_sequence(param_seq), rtrn_type(rtr_type) {} virtual ~igeneric_function() {} #define igeneric_function_empty_body(N) \ { \ exprtk_debug(("igeneric_function::operator() - Operator has not been overridden. ["#N"]\n")); \ return std::numeric_limits<T>::quiet_NaN(); \ } \ // f(i_0,i_1,....,i_N) --> Scalar inline virtual T operator() (parameter_list_t) igeneric_function_empty_body(1) // f(i_0,i_1,....,i_N) --> String inline virtual T operator() (std::string&, parameter_list_t) igeneric_function_empty_body(2) // f(psi,i_0,i_1,....,i_N) --> Scalar inline virtual T operator() (const std::size_t&, parameter_list_t) igeneric_function_empty_body(3) // f(psi,i_0,i_1,....,i_N) --> String inline virtual T operator() (const std::size_t&, std::string&, parameter_list_t) igeneric_function_empty_body(4) std::string parameter_sequence; return_type rtrn_type; }; template <typename T> class parser; template <typename T> class expression_helper; template <typename T> class symbol_table { public: typedef T (*ff00_functor)(); typedef T (*ff01_functor)(T); typedef T (*ff02_functor)(T,T); typedef T (*ff03_functor)(T,T,T); typedef T (*ff04_functor)(T,T,T,T); typedef T (*ff05_functor)(T,T,T,T,T); typedef T (*ff06_functor)(T,T,T,T,T,T); typedef T (*ff07_functor)(T,T,T,T,T,T,T); typedef T (*ff08_functor)(T,T,T,T,T,T,T,T); typedef T (*ff09_functor)(T,T,T,T,T,T,T,T,T); typedef T (*ff10_functor)(T,T,T,T,T,T,T,T,T,T); typedef T (*ff11_functor)(T,T,T,T,T,T,T,T,T,T,T); typedef T (*ff12_functor)(T,T,T,T,T,T,T,T,T,T,T,T); typedef T (*ff13_functor)(T,T,T,T,T,T,T,T,T,T,T,T,T); typedef T (*ff14_functor)(T,T,T,T,T,T,T,T,T,T,T,T,T,T); typedef T (*ff15_functor)(T,T,T,T,T,T,T,T,T,T,T,T,T,T,T); protected: struct freefunc00 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc00(ff00_functor ff) : exprtk::ifunction<T>(0), f(ff) {} inline T operator() () { return f(); } ff00_functor f; }; struct freefunc01 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc01(ff01_functor ff) : exprtk::ifunction<T>(1), f(ff) {} inline T operator() (const T& v0) { return f(v0); } ff01_functor f; }; struct freefunc02 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc02(ff02_functor ff) : exprtk::ifunction<T>(2), f(ff) {} inline T operator() (const T& v0, const T& v1) { return f(v0, v1); } ff02_functor f; }; struct freefunc03 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc03(ff03_functor ff) : exprtk::ifunction<T>(3), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2) { return f(v0, v1, v2); } ff03_functor f; }; struct freefunc04 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc04(ff04_functor ff) : exprtk::ifunction<T>(4), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3) { return f(v0, v1, v2, v3); } ff04_functor f; }; struct freefunc05 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc05(ff05_functor ff) : exprtk::ifunction<T>(5), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4) { return f(v0, v1, v2, v3, v4); } ff05_functor f; }; struct freefunc06 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc06(ff06_functor ff) : exprtk::ifunction<T>(6), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5) { return f(v0, v1, v2, v3, v4, v5); } ff06_functor f; }; struct freefunc07 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc07(ff07_functor ff) : exprtk::ifunction<T>(7), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5, const T& v6) { return f(v0, v1, v2, v3, v4, v5, v6); } ff07_functor f; }; struct freefunc08 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc08(ff08_functor ff) : exprtk::ifunction<T>(8), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5, const T& v6, const T& v7) { return f(v0, v1, v2, v3, v4, v5, v6, v7); } ff08_functor f; }; struct freefunc09 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc09(ff09_functor ff) : exprtk::ifunction<T>(9), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5, const T& v6, const T& v7, const T& v8) { return f(v0, v1, v2, v3, v4, v5, v6, v7, v8); } ff09_functor f; }; struct freefunc10 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc10(ff10_functor ff) : exprtk::ifunction<T>(10), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5, const T& v6, const T& v7, const T& v8, const T& v9) { return f(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9); } ff10_functor f; }; struct freefunc11 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc11(ff11_functor ff) : exprtk::ifunction<T>(11), f(ff) {} inline T operator() (const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5, const T& v6, const T& v7, const T& v8, const T& v9, const T& v10) { return f(v0, v1, v2, v3, v4, v5, v6, v7, v8, v9, v10); } ff11_functor f; }; struct freefunc12 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc12(ff12_functor ff) : exprtk::ifunction<T>(12), f(ff) {} inline T operator() (const T& v00, const T& v01, const T& v02, const T& v03, const T& v04, const T& v05, const T& v06, const T& v07, const T& v08, const T& v09, const T& v10, const T& v11) { return f(v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11); } ff12_functor f; }; struct freefunc13 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc13(ff13_functor ff) : exprtk::ifunction<T>(13), f(ff) {} inline T operator() (const T& v00, const T& v01, const T& v02, const T& v03, const T& v04, const T& v05, const T& v06, const T& v07, const T& v08, const T& v09, const T& v10, const T& v11, const T& v12) { return f(v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11, v12); } ff13_functor f; }; struct freefunc14 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc14(ff14_functor ff) : exprtk::ifunction<T>(14), f(ff) {} inline T operator() (const T& v00, const T& v01, const T& v02, const T& v03, const T& v04, const T& v05, const T& v06, const T& v07, const T& v08, const T& v09, const T& v10, const T& v11, const T& v12, const T& v13) { return f(v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11, v12, v13); } ff14_functor f; }; struct freefunc15 : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); freefunc15(ff15_functor ff) : exprtk::ifunction<T>(15), f(ff) {} inline T operator() (const T& v00, const T& v01, const T& v02, const T& v03, const T& v04, const T& v05, const T& v06, const T& v07, const T& v08, const T& v09, const T& v10, const T& v11, const T& v12, const T& v13, const T& v14) { return f(v00, v01, v02, v03, v04, v05, v06, v07, v08, v09, v10, v11, v12, v13, v14); } ff15_functor f; }; template <typename Type, typename RawType> struct type_store { typedef details::expression_node<T>* expression_ptr; typedef typename details::variable_node<T> variable_node_t; typedef ifunction<T> ifunction_t; typedef ivararg_function<T> ivararg_function_t; typedef igeneric_function<T> igeneric_function_t; typedef details::vector_holder<T> vector_t; #ifndef exprtk_disable_string_capabilities typedef typename details::stringvar_node<T> stringvar_node_t; #endif typedef Type type_t; typedef type_t* type_ptr; typedef std::pair<bool,type_ptr> type_pair_t; typedef std::map<std::string,type_pair_t,details::ilesscompare> type_map_t; typedef typename type_map_t::iterator tm_itr_t; typedef typename type_map_t::const_iterator tm_const_itr_t; enum { lut_size = 256 }; type_map_t map; std::size_t size; type_store() : size(0) {} inline bool symbol_exists(const std::string& symbol_name) const { if (symbol_name.empty()) return false; else if (map.end() != map.find(symbol_name)) return true; else return false; } template <typename PtrType> inline std::string entity_name(const PtrType& ptr) const { if (map.empty()) return std::string(); tm_const_itr_t itr = map.begin(); while (map.end() != itr) { if (itr->second.second == ptr) { return itr->first; } else ++itr; } return std::string(); } inline bool is_constant(const std::string& symbol_name) const { if (symbol_name.empty()) return false; else { const tm_const_itr_t itr = map.find(symbol_name); if (map.end() == itr) return false; else return (*itr).second.first; } } template <typename Tie, typename RType> inline bool add_impl(const std::string& symbol_name, RType t, const bool is_const) { if (symbol_name.size() > 1) { for (std::size_t i = 0; i < details::reserved_symbols_size; ++i) { if (details::imatch(symbol_name, details::reserved_symbols[i])) { return false; } } } const tm_itr_t itr = map.find(symbol_name); if (map.end() == itr) { map[symbol_name] = Tie::make(t,is_const); ++size; } return true; } struct tie_array { static inline std::pair<bool,vector_t*> make(std::pair<T*,std::size_t> v, const bool is_const = false) { return std::make_pair(is_const, new vector_t(v.first, v.second)); } }; struct tie_stdvec { template <typename Allocator> static inline std::pair<bool,vector_t*> make(std::vector<T,Allocator>& v, const bool is_const = false) { return std::make_pair(is_const, new vector_t(v)); } }; struct tie_vecview { static inline std::pair<bool,vector_t*> make(exprtk::vector_view<T>& v, const bool is_const = false) { return std::make_pair(is_const, new vector_t(v)); } }; struct tie_stddeq { template <typename Allocator> static inline std::pair<bool,vector_t*> make(std::deque<T,Allocator>& v, const bool is_const = false) { return std::make_pair(is_const, new vector_t(v)); } }; template <std::size_t v_size> inline bool add(const std::string& symbol_name, T (&v)[v_size], const bool is_const = false) { return add_impl<tie_array,std::pair<T*,std::size_t> > (symbol_name, std::make_pair(v,v_size), is_const); } inline bool add(const std::string& symbol_name, T* v, const std::size_t v_size, const bool is_const = false) { return add_impl<tie_array,std::pair<T*,std::size_t> > (symbol_name, std::make_pair(v,v_size), is_const); } template <typename Allocator> inline bool add(const std::string& symbol_name, std::vector<T,Allocator>& v, const bool is_const = false) { return add_impl<tie_stdvec,std::vector<T,Allocator>&> (symbol_name, v, is_const); } inline bool add(const std::string& symbol_name, exprtk::vector_view<T>& v, const bool is_const = false) { return add_impl<tie_vecview,exprtk::vector_view<T>&> (symbol_name, v, is_const); } template <typename Allocator> inline bool add(const std::string& symbol_name, std::deque<T,Allocator>& v, const bool is_const = false) { return add_impl<tie_stddeq,std::deque<T,Allocator>&> (symbol_name, v, is_const); } inline bool add(const std::string& symbol_name, RawType& t, const bool is_const = false) { struct tie { static inline std::pair<bool,variable_node_t*> make(T& t,const bool is_const = false) { return std::make_pair(is_const, new variable_node_t(t)); } #ifndef exprtk_disable_string_capabilities static inline std::pair<bool,stringvar_node_t*> make(std::string& t,const bool is_const = false) { return std::make_pair(is_const, new stringvar_node_t(t)); } #endif static inline std::pair<bool,function_t*> make(function_t& t, const bool is_constant = false) { return std::make_pair(is_constant,&t); } static inline std::pair<bool,vararg_function_t*> make(vararg_function_t& t, const bool is_const = false) { return std::make_pair(is_const,&t); } static inline std::pair<bool,generic_function_t*> make(generic_function_t& t, const bool is_constant = false) { return std::make_pair(is_constant,&t); } }; const tm_itr_t itr = map.find(symbol_name); if (map.end() == itr) { map[symbol_name] = tie::make(t,is_const); ++size; } return true; } inline type_ptr get(const std::string& symbol_name) const { const tm_const_itr_t itr = map.find(symbol_name); if (map.end() == itr) return reinterpret_cast<type_ptr>(0); else return itr->second.second; } template <typename TType, typename TRawType, typename PtrType> struct ptr_match { static inline bool test(const PtrType, const void*) { return false; } }; template <typename TType, typename TRawType> struct ptr_match<TType,TRawType,variable_node_t*> { static inline bool test(const variable_node_t* p, const void* ptr) { exprtk_debug(("ptr_match::test() - %p <--> %p\n",(void*)(&(p->ref())),ptr)); return (&(p->ref()) == ptr); } }; inline type_ptr get_from_varptr(const void* ptr) const { tm_const_itr_t itr = map.begin(); while (map.end() != itr) { type_ptr ret_ptr = itr->second.second; if (ptr_match<Type,RawType,type_ptr>::test(ret_ptr,ptr)) { return ret_ptr; } ++itr; } return type_ptr(0); } inline bool remove(const std::string& symbol_name, const bool delete_node = true) { const tm_itr_t itr = map.find(symbol_name); if (map.end() != itr) { struct deleter { static inline void process(std::pair<bool,variable_node_t*>& n) { delete n.second; } static inline void process(std::pair<bool,vector_t*>& n) { delete n.second; } #ifndef exprtk_disable_string_capabilities static inline void process(std::pair<bool,stringvar_node_t*>& n) { delete n.second; } #endif static inline void process(std::pair<bool,function_t*>&) { } }; if (delete_node) { deleter::process((*itr).second); } map.erase(itr); --size; return true; } else return false; } inline RawType& type_ref(const std::string& symbol_name) { struct init_type { static inline double set(double) { return (0.0); } static inline double set(long double) { return (0.0); } static inline float set(float) { return (0.0f); } static inline std::string set(std::string) { return std::string(""); } }; static RawType null_type = init_type::set(RawType()); const tm_const_itr_t itr = map.find(symbol_name); if (map.end() == itr) return null_type; else return itr->second.second->ref(); } inline void clear(const bool delete_node = true) { struct deleter { static inline void process(std::pair<bool,variable_node_t*>& n) { delete n.second; } static inline void process(std::pair<bool,vector_t*>& n) { delete n.second; } static inline void process(std::pair<bool,function_t*>&) { } #ifndef exprtk_disable_string_capabilities static inline void process(std::pair<bool,stringvar_node_t*>& n) { delete n.second; } #endif }; if (!map.empty()) { if (delete_node) { tm_itr_t itr = map.begin(); tm_itr_t end = map.end (); while (end != itr) { deleter::process((*itr).second); ++itr; } } map.clear(); } size = 0; } template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_list(Sequence<std::pair<std::string,RawType>,Allocator>& list) const { std::size_t count = 0; if (!map.empty()) { tm_const_itr_t itr = map.begin(); tm_const_itr_t end = map.end (); while (end != itr) { list.push_back(std::make_pair((*itr).first,itr->second.second->ref())); ++itr; ++count; } } return count; } template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_list(Sequence<std::string,Allocator>& vlist) const { std::size_t count = 0; if (!map.empty()) { tm_const_itr_t itr = map.begin(); tm_const_itr_t end = map.end (); while (end != itr) { vlist.push_back((*itr).first); ++itr; ++count; } } return count; } }; typedef details::expression_node<T>* expression_ptr; typedef typename details::variable_node<T> variable_t; typedef typename details::vector_holder<T> vector_holder_t; typedef variable_t* variable_ptr; #ifndef exprtk_disable_string_capabilities typedef typename details::stringvar_node<T> stringvar_t; typedef stringvar_t* stringvar_ptr; #endif typedef ifunction <T> function_t; typedef ivararg_function <T> vararg_function_t; typedef igeneric_function<T> generic_function_t; typedef function_t* function_ptr; typedef vararg_function_t* vararg_function_ptr; typedef generic_function_t* generic_function_ptr; static const std::size_t lut_size = 256; // Symbol Table Holder struct control_block { struct st_data { type_store<typename details::variable_node<T>,T> variable_store; #ifndef exprtk_disable_string_capabilities type_store<typename details::stringvar_node<T>,std::string> stringvar_store; #endif type_store<ifunction<T>,ifunction<T> > function_store; type_store<ivararg_function <T>,ivararg_function <T> > vararg_function_store; type_store<igeneric_function<T>,igeneric_function<T> > generic_function_store; type_store<igeneric_function<T>,igeneric_function<T> > string_function_store; type_store<vector_holder_t,vector_holder_t> vector_store; st_data() { for (std::size_t i = 0; i < details::reserved_words_size; ++i) { reserved_symbol_table_.insert(details::reserved_words[i]); } for (std::size_t i = 0; i < details::reserved_symbols_size; ++i) { reserved_symbol_table_.insert(details::reserved_symbols[i]); } } ~st_data() { for (std::size_t i = 0; i < free_function_list_.size(); ++i) { delete free_function_list_[i]; } } inline bool is_reserved_symbol(const std::string& symbol) const { return (reserved_symbol_table_.end() != reserved_symbol_table_.find(symbol)); } static inline st_data* create() { return (new st_data); } static inline void destroy(st_data*& sd) { delete sd; sd = reinterpret_cast<st_data*>(0); } std::list<T> local_symbol_list_; std::list<std::string> local_stringvar_list_; std::set<std::string> reserved_symbol_table_; std::vector<ifunction<T>*> free_function_list_; }; control_block() : ref_count(1), data_(st_data::create()) {} control_block(st_data* data) : ref_count(1), data_(data) {} ~control_block() { if (data_ && (0 == ref_count)) { st_data::destroy(data_); } } static inline control_block* create() { return (new control_block); } template <typename SymTab> static inline void destroy(control_block*& cntrl_blck, SymTab* sym_tab) { if (cntrl_blck) { if ( (0 != cntrl_blck->ref_count) && (0 == --cntrl_blck->ref_count) ) { if (sym_tab) sym_tab->clear(); delete cntrl_blck; } cntrl_blck = 0; } } std::size_t ref_count; st_data* data_; }; public: symbol_table() : control_block_(control_block::create()) { clear(); } ~symbol_table() { control_block::destroy(control_block_,this); } symbol_table(const symbol_table<T>& st) { control_block_ = st.control_block_; control_block_->ref_count++; } inline symbol_table<T>& operator=(const symbol_table<T>& st) { if (this != &st) { control_block::destroy(control_block_,reinterpret_cast<symbol_table<T>*>(0)); control_block_ = st.control_block_; control_block_->ref_count++; } return (*this); } inline bool operator==(const symbol_table<T>& st) { return (this == &st) || (control_block_ == st.control_block_); } inline void clear_variables(const bool delete_node = true) { local_data().variable_store.clear(delete_node); } inline void clear_functions() { local_data().function_store.clear(); } inline void clear_strings() { #ifndef exprtk_disable_string_capabilities local_data().stringvar_store.clear(); #endif } inline void clear_vectors() { local_data().vector_store.clear(); } inline void clear_local_constants() { local_data().local_symbol_list_.clear(); } inline void clear() { if (!valid()) return; clear_variables (); clear_functions (); clear_strings (); clear_vectors (); clear_local_constants(); } inline std::size_t variable_count() const { if (valid()) return local_data().variable_store.size; else return 0; } #ifndef exprtk_disable_string_capabilities inline std::size_t stringvar_count() const { if (valid()) return local_data().stringvar_store.size; else return 0; } #endif inline std::size_t function_count() const { if (valid()) return local_data().function_store.size; else return 0; } inline std::size_t vector_count() const { if (valid()) return local_data().vector_store.size; else return 0; } inline variable_ptr get_variable(const std::string& variable_name) const { if (!valid()) return reinterpret_cast<variable_ptr>(0); else if (!valid_symbol(variable_name)) return reinterpret_cast<variable_ptr>(0); else return local_data().variable_store.get(variable_name); } inline variable_ptr get_variable(const T& var_ref) const { if (!valid()) return reinterpret_cast<variable_ptr>(0); else return local_data().variable_store.get_from_varptr( reinterpret_cast<const void*>(&var_ref)); } #ifndef exprtk_disable_string_capabilities inline stringvar_ptr get_stringvar(const std::string& string_name) const { if (!valid()) return reinterpret_cast<stringvar_ptr>(0); else if (!valid_symbol(string_name)) return reinterpret_cast<stringvar_ptr>(0); else return local_data().stringvar_store.get(string_name); } #endif inline function_ptr get_function(const std::string& function_name) const { if (!valid()) return reinterpret_cast<function_ptr>(0); else if (!valid_symbol(function_name)) return reinterpret_cast<function_ptr>(0); else return local_data().function_store.get(function_name); } inline vararg_function_ptr get_vararg_function(const std::string& vararg_function_name) const { if (!valid()) return reinterpret_cast<vararg_function_ptr>(0); else if (!valid_symbol(vararg_function_name)) return reinterpret_cast<vararg_function_ptr>(0); else return local_data().vararg_function_store.get(vararg_function_name); } inline generic_function_ptr get_generic_function(const std::string& function_name) const { if (!valid()) return reinterpret_cast<generic_function_ptr>(0); else if (!valid_symbol(function_name)) return reinterpret_cast<generic_function_ptr>(0); else return local_data().generic_function_store.get(function_name); } inline generic_function_ptr get_string_function(const std::string& function_name) const { if (!valid()) return reinterpret_cast<generic_function_ptr>(0); else if (!valid_symbol(function_name)) return reinterpret_cast<generic_function_ptr>(0); else return local_data().string_function_store.get(function_name); } typedef vector_holder_t* vector_holder_ptr; inline vector_holder_ptr get_vector(const std::string& vector_name) const { if (!valid()) return reinterpret_cast<vector_holder_ptr>(0); else if (!valid_symbol(vector_name)) return reinterpret_cast<vector_holder_ptr>(0); else return local_data().vector_store.get(vector_name); } inline T& variable_ref(const std::string& symbol_name) { static T null_var = T(0); if (!valid()) return null_var; else if (!valid_symbol(symbol_name)) return null_var; else return local_data().variable_store.type_ref(symbol_name); } #ifndef exprtk_disable_string_capabilities inline std::string& stringvar_ref(const std::string& symbol_name) { static std::string null_stringvar; if (!valid()) return null_stringvar; else if (!valid_symbol(symbol_name)) return null_stringvar; else return local_data().stringvar_store.type_ref(symbol_name); } #endif inline bool is_constant_node(const std::string& symbol_name) const { if (!valid()) return false; else if (!valid_symbol(symbol_name)) return false; else return local_data().variable_store.is_constant(symbol_name); } #ifndef exprtk_disable_string_capabilities inline bool is_constant_string(const std::string& symbol_name) const { if (!valid()) return false; else if (!valid_symbol(symbol_name)) return false; else if (!local_data().stringvar_store.symbol_exists(symbol_name)) return false; else return local_data().stringvar_store.is_constant(symbol_name); } #endif inline bool create_variable(const std::string& variable_name, const T& value = T(0)) { if (!valid()) return false; else if (!valid_symbol(variable_name)) return false; else if (symbol_exists(variable_name)) return false; local_data().local_symbol_list_.push_back(value); T& t = local_data().local_symbol_list_.back(); return add_variable(variable_name,t); } #ifndef exprtk_disable_string_capabilities inline bool create_stringvar(const std::string& stringvar_name, const std::string& value = std::string("")) { if (!valid()) return false; else if (!valid_symbol(stringvar_name)) return false; else if (symbol_exists(stringvar_name)) return false; local_data().local_stringvar_list_.push_back(value); std::string& s = local_data().local_stringvar_list_.back(); return add_stringvar(stringvar_name,s); } #endif inline bool add_raw_variable(const std::string& variable_name, T& t, const bool is_constant = false) { if (!valid()) return false; else if (symbol_exists(variable_name)) return false; else return local_data().variable_store.add(variable_name,t,is_constant); } inline bool add_variable(const std::string& variable_name, T& t, const bool is_constant = false) { if (!valid()) return false; else if (!valid_symbol(variable_name)) return false; else if (symbol_exists(variable_name)) return false; else return local_data().variable_store.add(variable_name,t,is_constant); } inline bool add_constant(const std::string& constant_name, const T& value) { if (!valid()) return false; else if (!valid_symbol(constant_name)) return false; else if (symbol_exists(constant_name)) return false; local_data().local_symbol_list_.push_back(value); T& t = local_data().local_symbol_list_.back(); return add_variable(constant_name,t,true); } #ifndef exprtk_disable_string_capabilities inline bool add_stringvar(const std::string& stringvar_name, std::string& s, const bool is_constant = false) { if (!valid()) return false; else if (!valid_symbol(stringvar_name)) return false; else if (symbol_exists(stringvar_name)) return false; else return local_data().stringvar_store.add(stringvar_name,s,is_constant); } #endif inline bool add_function(const std::string& function_name, function_t& function) { if (!valid()) return false; else if (!valid_symbol(function_name)) return false; else if (symbol_exists(function_name)) return false; else return local_data().function_store.add(function_name,function); } inline bool add_function(const std::string& vararg_function_name, vararg_function_t& vararg_function) { if (!valid()) return false; else if (!valid_symbol(vararg_function_name)) return false; else if (symbol_exists(vararg_function_name)) return false; else return local_data().vararg_function_store.add(vararg_function_name,vararg_function); } inline bool add_function(const std::string& function_name, generic_function_t& function) { if (!valid()) return false; else if (!valid_symbol(function_name)) return false; else if (symbol_exists(function_name)) return false; else if (std::string::npos != function.parameter_sequence.find_first_not_of("STVZ*?|")) return false; else if (generic_function_t::e_rtrn_scalar == function.rtrn_type) return local_data().generic_function_store.add(function_name,function); else if (generic_function_t::e_rtrn_string == function.rtrn_type) return local_data().string_function_store.add(function_name, function); else return false; } #define exprtk_define_freefunction(NN) \ inline bool add_function(const std::string& function_name, ff##NN##_functor function) \ { \ if (!valid()) \ { return false; } \ if (!valid_symbol(function_name)) \ { return false; } \ if (symbol_exists(function_name)) \ { return false; } \ \ exprtk::ifunction<T>* ifunc = new freefunc##NN(function); \ \ local_data().free_function_list_.push_back(ifunc); \ \ return add_function(function_name,(*local_data().free_function_list_.back())); \ } \ exprtk_define_freefunction(00) exprtk_define_freefunction(01) exprtk_define_freefunction(02) exprtk_define_freefunction(03) exprtk_define_freefunction(04) exprtk_define_freefunction(05) exprtk_define_freefunction(06) exprtk_define_freefunction(07) exprtk_define_freefunction(08) exprtk_define_freefunction(09) exprtk_define_freefunction(10) exprtk_define_freefunction(11) exprtk_define_freefunction(12) exprtk_define_freefunction(13) exprtk_define_freefunction(14) exprtk_define_freefunction(15) #undef exprtk_define_freefunction inline bool add_reserved_function(const std::string& function_name, function_t& function) { if (!valid()) return false; else if (!valid_symbol(function_name,false)) return false; else if (symbol_exists(function_name,false)) return false; else return local_data().function_store.add(function_name,function); } inline bool add_reserved_function(const std::string& vararg_function_name, vararg_function_t& vararg_function) { if (!valid()) return false; else if (!valid_symbol(vararg_function_name,false)) return false; else if (symbol_exists(vararg_function_name,false)) return false; else return local_data().vararg_function_store.add(vararg_function_name,vararg_function); } inline bool add_reserved_function(const std::string& function_name, generic_function_t& function) { if (!valid()) return false; else if (!valid_symbol(function_name,false)) return false; else if (symbol_exists(function_name,false)) return false; else if (std::string::npos != function.parameter_sequence.find_first_not_of("STV*?|")) return false; else if (generic_function_t::e_rtrn_scalar == function.rtrn_type) return local_data().generic_function_store.add(function_name,function); else if (generic_function_t::e_rtrn_string == function.rtrn_type) return local_data().string_function_store.add(function_name, function); else return false; } template <std::size_t N> inline bool add_vector(const std::string& vector_name, T (&v)[N]) { if (!valid()) return false; else if (!valid_symbol(vector_name)) return false; else if (symbol_exists(vector_name)) return false; else return local_data().vector_store.add(vector_name,v); } inline bool add_vector(const std::string& vector_name, T* v, const std::size_t& v_size) { if (!valid()) return false; else if (!valid_symbol(vector_name)) return false; else if (symbol_exists(vector_name)) return false; else if (0 == v_size) return false; else return local_data().vector_store.add(vector_name,v,v_size); } template <typename Allocator> inline bool add_vector(const std::string& vector_name, std::vector<T,Allocator>& v) { if (!valid()) return false; else if (!valid_symbol(vector_name)) return false; else if (symbol_exists(vector_name)) return false; else if (0 == v.size()) return false; else return local_data().vector_store.add(vector_name,v); } inline bool add_vector(const std::string& vector_name, exprtk::vector_view<T>& v) { if (!valid()) return false; else if (!valid_symbol(vector_name)) return false; else if (symbol_exists(vector_name)) return false; else if (0 == v.size()) return false; else return local_data().vector_store.add(vector_name,v); } inline bool remove_variable(const std::string& variable_name, const bool delete_node = true) { if (!valid()) return false; else return local_data().variable_store.remove(variable_name, delete_node); } #ifndef exprtk_disable_string_capabilities inline bool remove_stringvar(const std::string& string_name) { if (!valid()) return false; else return local_data().stringvar_store.remove(string_name); } #endif inline bool remove_function(const std::string& function_name) { if (!valid()) return false; else return local_data().function_store.remove(function_name); } inline bool remove_vararg_function(const std::string& vararg_function_name) { if (!valid()) return false; else return local_data().vararg_function_store.remove(vararg_function_name); } inline bool remove_vector(const std::string& vector_name) { if (!valid()) return false; else return local_data().vector_store.remove(vector_name); } inline bool add_constants() { return add_pi () && add_epsilon () && add_infinity() ; } inline bool add_pi() { const typename details::numeric::details::number_type<T>::type num_type; static const T local_pi = details::numeric::details::const_pi_impl<T>(num_type); return add_constant("pi",local_pi); } inline bool add_epsilon() { static const T local_epsilon = details::numeric::details::epsilon_type<T>::value(); return add_constant("epsilon",local_epsilon); } inline bool add_infinity() { static const T local_infinity = std::numeric_limits<T>::infinity(); return add_constant("inf",local_infinity); } template <typename Package> inline bool add_package(Package& package) { return package.register_package(*this); } template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_variable_list(Sequence<std::pair<std::string,T>,Allocator>& vlist) const { if (!valid()) return 0; else return local_data().variable_store.get_list(vlist); } template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_variable_list(Sequence<std::string,Allocator>& vlist) const { if (!valid()) return 0; else return local_data().variable_store.get_list(vlist); } #ifndef exprtk_disable_string_capabilities template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_stringvar_list(Sequence<std::pair<std::string,std::string>,Allocator>& svlist) const { if (!valid()) return 0; else return local_data().stringvar_store.get_list(svlist); } template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_stringvar_list(Sequence<std::string,Allocator>& svlist) const { if (!valid()) return 0; else return local_data().stringvar_store.get_list(svlist); } #endif template <typename Allocator, template <typename, typename> class Sequence> inline std::size_t get_vector_list(Sequence<std::string,Allocator>& vlist) const { if (!valid()) return 0; else return local_data().vector_store.get_list(vlist); } inline bool symbol_exists(const std::string& symbol_name, const bool check_reserved_symb = true) const { /* Function will return true if symbol_name exists as either a reserved symbol, variable, stringvar, vector or function name in any of the type stores. */ if (!valid()) return false; else if (local_data().variable_store.symbol_exists(symbol_name)) return true; #ifndef exprtk_disable_string_capabilities else if (local_data().stringvar_store.symbol_exists(symbol_name)) return true; #endif else if (local_data().vector_store.symbol_exists(symbol_name)) return true; else if (local_data().function_store.symbol_exists(symbol_name)) return true; else if (check_reserved_symb && local_data().is_reserved_symbol(symbol_name)) return true; else return false; } inline bool is_variable(const std::string& variable_name) const { if (!valid()) return false; else return local_data().variable_store.symbol_exists(variable_name); } #ifndef exprtk_disable_string_capabilities inline bool is_stringvar(const std::string& stringvar_name) const { if (!valid()) return false; else return local_data().stringvar_store.symbol_exists(stringvar_name); } inline bool is_conststr_stringvar(const std::string& symbol_name) const { if (!valid()) return false; else if (!valid_symbol(symbol_name)) return false; else if (!local_data().stringvar_store.symbol_exists(symbol_name)) return false; return ( local_data().stringvar_store.symbol_exists(symbol_name) || local_data().stringvar_store.is_constant (symbol_name) ); } #endif inline bool is_function(const std::string& function_name) const { if (!valid()) return false; else return local_data().function_store.symbol_exists(function_name); } inline bool is_vararg_function(const std::string& vararg_function_name) const { if (!valid()) return false; else return local_data().vararg_function_store.symbol_exists(vararg_function_name); } inline bool is_vector(const std::string& vector_name) const { if (!valid()) return false; else return local_data().vector_store.symbol_exists(vector_name); } inline std::string get_variable_name(const expression_ptr& ptr) const { return local_data().variable_store.entity_name(ptr); } inline std::string get_vector_name(const vector_holder_ptr& ptr) const { return local_data().vector_store.entity_name(ptr); } #ifndef exprtk_disable_string_capabilities inline std::string get_stringvar_name(const expression_ptr& ptr) const { return local_data().stringvar_store.entity_name(ptr); } inline std::string get_conststr_stringvar_name(const expression_ptr& ptr) const { return local_data().stringvar_store.entity_name(ptr); } #endif inline bool valid() const { // Symbol table sanity check. return control_block_ && control_block_->data_; } inline void load_from(const symbol_table<T>& st) { { std::vector<std::string> name_list; st.local_data().function_store.get_list(name_list); if (!name_list.empty()) { for (std::size_t i = 0; i < name_list.size(); ++i) { exprtk::ifunction<T>& ifunc = *st.get_function(name_list[i]); add_function(name_list[i],ifunc); } } } { std::vector<std::string> name_list; st.local_data().vararg_function_store.get_list(name_list); if (!name_list.empty()) { for (std::size_t i = 0; i < name_list.size(); ++i) { exprtk::ivararg_function<T>& ivafunc = *st.get_vararg_function(name_list[i]); add_function(name_list[i],ivafunc); } } } { std::vector<std::string> name_list; st.local_data().generic_function_store.get_list(name_list); if (!name_list.empty()) { for (std::size_t i = 0; i < name_list.size(); ++i) { exprtk::igeneric_function<T>& ifunc = *st.get_generic_function(name_list[i]); add_function(name_list[i],ifunc); } } } { std::vector<std::string> name_list; st.local_data().string_function_store.get_list(name_list); if (!name_list.empty()) { for (std::size_t i = 0; i < name_list.size(); ++i) { exprtk::igeneric_function<T>& ifunc = *st.get_string_function(name_list[i]); add_function(name_list[i],ifunc); } } } } private: inline bool valid_symbol(const std::string& symbol, const bool check_reserved_symb = true) const { if (symbol.empty()) return false; else if (!details::is_letter(symbol[0])) return false; #if !allow_escaped_symbols else if (symbol.size() > 1) { bool escaped = false; for (std::size_t i = 1; i < symbol.size(); ++i) { if ('\\' == symbol[i]) escaped = true; else if (escaped) escaped = false; else if ( !details::is_letter_or_digit(symbol[i]) && ('_' != symbol[i]) ) { if (('.' == symbol[i]) && (i < (symbol.size() - 1))) continue; else return false; } } } #endif return (check_reserved_symb) ? (!local_data().is_reserved_symbol(symbol)) : true; } inline bool valid_function(const std::string& symbol) const { if (symbol.empty()) return false; else if (!details::is_letter(symbol[0])) return false; else if (symbol.size() > 1) { for (std::size_t i = 1; i < symbol.size(); ++i) { if ( !details::is_letter_or_digit(symbol[i]) && ('_' != symbol[i]) ) { if (('.' == symbol[i]) && (i < (symbol.size() - 1))) continue; else return false; } } } return true; } typedef typename control_block::st_data local_data_t; inline local_data_t& local_data() { return *(control_block_->data_); } inline const local_data_t& local_data() const { return *(control_block_->data_); } control_block* control_block_; friend class parser<T>; }; template <typename T> class function_compositor; template <typename T> class expression { private: typedef details::expression_node<T>* expression_ptr; typedef details::vector_holder<T>* vector_holder_ptr; typedef std::vector<symbol_table<T> > symtab_list_t; struct control_block { enum data_type { e_unknown , e_expr , e_vecholder, e_data , e_vecdata , e_string }; struct data_pack { data_pack() : pointer(0), type(e_unknown), size(0) {} data_pack(void* ptr, const data_type dt, const std::size_t sz = 0) : pointer(ptr), type(dt), size(sz) {} void* pointer; data_type type; std::size_t size; }; typedef std::vector<data_pack> local_data_list_t; typedef results_context<T> results_context_t; control_block() : ref_count(0), expr (0), results (0), retinv_null(false), return_invoked(&retinv_null) {} control_block(expression_ptr e) : ref_count(1), expr (e), results (0), retinv_null(false), return_invoked(&retinv_null) {} ~control_block() { if (expr && details::branch_deletable(expr)) { destroy_node(expr); } if (!local_data_list.empty()) { for (std::size_t i = 0; i < local_data_list.size(); ++i) { switch (local_data_list[i].type) { case e_expr : delete reinterpret_cast<expression_ptr>(local_data_list[i].pointer); break; case e_vecholder : delete reinterpret_cast<vector_holder_ptr>(local_data_list[i].pointer); break; case e_data : delete (T*)(local_data_list[i].pointer); break; case e_vecdata : delete [] (T*)(local_data_list[i].pointer); break; case e_string : delete (std::string*)(local_data_list[i].pointer); break; default : break; } } } if (results) { delete results; } } static inline control_block* create(expression_ptr e) { return new control_block(e); } static inline void destroy(control_block*& cntrl_blck) { if (cntrl_blck) { if ( (0 != cntrl_blck->ref_count) && (0 == --cntrl_blck->ref_count) ) { delete cntrl_blck; } cntrl_blck = 0; } } std::size_t ref_count; expression_ptr expr; local_data_list_t local_data_list; results_context_t* results; bool retinv_null; bool* return_invoked; friend class function_compositor<T>; }; public: expression() : control_block_(0) { set_expression(new details::null_node<T>()); } expression(const expression<T>& e) : control_block_ (e.control_block_ ), symbol_table_list_(e.symbol_table_list_) { control_block_->ref_count++; } expression(const symbol_table<T>& symbol_table) : control_block_(0) { set_expression(new details::null_node<T>()); symbol_table_list_.push_back(symbol_table); } inline expression<T>& operator=(const expression<T>& e) { if (this != &e) { if (control_block_) { if ( (0 != control_block_->ref_count) && (0 == --control_block_->ref_count) ) { delete control_block_; } control_block_ = 0; } control_block_ = e.control_block_; control_block_->ref_count++; symbol_table_list_ = e.symbol_table_list_; } return *this; } inline bool operator==(const expression<T>& e) { return (this == &e); } inline bool operator!() const { return ( (0 == control_block_ ) || (0 == control_block_->expr) ); } inline expression<T>& release() { control_block::destroy(control_block_); return (*this); } ~expression() { control_block::destroy(control_block_); } inline T value() const { return control_block_->expr->value(); } inline T operator() () const { return value(); } inline operator T() const { return value(); } inline operator bool() const { return details::is_true(value()); } inline void register_symbol_table(symbol_table<T>& st) { symbol_table_list_.push_back(st); } inline const symbol_table<T>& get_symbol_table(const std::size_t& index = 0) const { return symbol_table_list_[index]; } inline symbol_table<T>& get_symbol_table(const std::size_t& index = 0) { return symbol_table_list_[index]; } typedef results_context<T> results_context_t; inline const results_context_t& results() const { if (control_block_->results) return (*control_block_->results); else { static const results_context_t null_results; return null_results; } } inline bool return_invoked() const { return (*control_block_->return_invoked); } private: inline symtab_list_t get_symbol_table_list() const { return symbol_table_list_; } inline void set_expression(const expression_ptr expr) { if (expr) { if (control_block_) { if (0 == --control_block_->ref_count) { delete control_block_; } } control_block_ = control_block::create(expr); } } inline void register_local_var(expression_ptr expr) { if (expr) { if (control_block_) { control_block_-> local_data_list.push_back( typename expression<T>::control_block:: data_pack(reinterpret_cast<void*>(expr), control_block::e_expr)); } } } inline void register_local_var(vector_holder_ptr vec_holder) { if (vec_holder) { if (control_block_) { control_block_-> local_data_list.push_back( typename expression<T>::control_block:: data_pack(reinterpret_cast<void*>(vec_holder), control_block::e_vecholder)); } } } inline void register_local_data(void* data, const std::size_t& size = 0, const std::size_t data_mode = 0) { if (data) { if (control_block_) { typename control_block::data_type dt = control_block::e_data; switch (data_mode) { case 0 : dt = control_block::e_data; break; case 1 : dt = control_block::e_vecdata; break; case 2 : dt = control_block::e_string; break; } control_block_-> local_data_list.push_back( typename expression<T>::control_block:: data_pack(reinterpret_cast<void*>(data), dt, size)); } } } inline const typename control_block::local_data_list_t& local_data_list() { if (control_block_) { return control_block_->local_data_list; } else { static typename control_block::local_data_list_t null_local_data_list; return null_local_data_list; } } inline void register_return_results(results_context_t* rc) { if (control_block_ && rc) { control_block_->results = rc; } } inline void set_retinvk(bool* retinvk_ptr) { if (control_block_) { control_block_->return_invoked = retinvk_ptr; } } control_block* control_block_; symtab_list_t symbol_table_list_; friend class parser<T>; friend class expression_helper<T>; friend class function_compositor<T>; }; template <typename T> class expression_helper { public: static inline bool is_constant(const expression<T>& expr) { return details::is_constant_node(expr.control_block_->expr); } static inline bool is_variable(const expression<T>& expr) { return details::is_variable_node(expr.control_block_->expr); } static inline bool is_unary(const expression<T>& expr) { return details::is_unary_node(expr.control_block_->expr); } static inline bool is_binary(const expression<T>& expr) { return details::is_binary_node(expr.control_block_->expr); } static inline bool is_function(const expression<T>& expr) { return details::is_function(expr.control_block_->expr); } static inline bool is_null(const expression<T>& expr) { return details::is_null_node(expr.control_block_->expr); } }; template <typename T> inline bool is_valid(const expression<T>& expr) { return !expression_helper<T>::is_null(expr); } namespace parser_error { enum error_mode { e_unknown = 0, e_syntax = 1, e_token = 2, e_numeric = 4, e_symtab = 5, e_lexer = 6, e_helper = 7 }; struct type { type() : mode(parser_error::e_unknown), line_no (0), column_no(0) {} lexer::token token; error_mode mode; std::string diagnostic; std::string src_location; std::string error_line; std::size_t line_no; std::size_t column_no; }; inline type make_error(const error_mode mode, const std::string& diagnostic = "", const std::string& src_location = "") { type t; t.mode = mode; t.token.type = lexer::token::e_error; t.diagnostic = diagnostic; t.src_location = src_location; exprtk_debug(("%s\n",diagnostic .c_str())); return t; } inline type make_error(const error_mode mode, const lexer::token& tk, const std::string& diagnostic = "", const std::string& src_location = "") { type t; t.mode = mode; t.token = tk; t.diagnostic = diagnostic; t.src_location = src_location; exprtk_debug(("%s\n",diagnostic .c_str())); return t; } inline std::string to_str(error_mode mode) { switch (mode) { case e_unknown : return std::string("Unknown Error"); case e_syntax : return std::string("Syntax Error" ); case e_token : return std::string("Token Error" ); case e_numeric : return std::string("Numeric Error"); case e_symtab : return std::string("Symbol Error" ); case e_lexer : return std::string("Lexer Error" ); case e_helper : return std::string("Helper Error" ); default : return std::string("Unknown Error"); } } inline bool update_error(type& error, const std::string& expression) { if ( expression.empty() || (error.token.position > expression.size()) || (std::numeric_limits<std::size_t>::max() == error.token.position) ) { return false; } std::size_t error_line_start = 0; for (std::size_t i = error.token.position; i > 0; --i) { const details::char_t c = expression[i]; if (('\n' == c) || ('\r' == c)) { error_line_start = i + 1; break; } } std::size_t next_nl_position = std::min(expression.size(), expression.find_first_of('\n',error.token.position + 1)); error.column_no = error.token.position - error_line_start; error.error_line = expression.substr(error_line_start, next_nl_position - error_line_start); error.line_no = 0; for (std::size_t i = 0; i < next_nl_position; ++i) { if ('\n' == expression[i]) ++error.line_no; } return true; } inline void dump_error(const type& error) { printf("Position: %02d Type: [%s] Msg: %s\n", static_cast<int>(error.token.position), exprtk::parser_error::to_str(error.mode).c_str(), error.diagnostic.c_str()); } } namespace details { template <typename Parser> inline void disable_type_checking(Parser& p) { p.state_.type_check_enabled = false; } } template <typename T> class parser : public lexer::parser_helper { private: enum precedence_level { e_level00, e_level01, e_level02, e_level03, e_level04, e_level05, e_level06, e_level07, e_level08, e_level09, e_level10, e_level11, e_level12, e_level13, e_level14 }; typedef const T& cref_t; typedef const T const_t; typedef ifunction <T> F; typedef ivararg_function <T> VAF; typedef igeneric_function <T> GF; typedef ifunction <T> ifunction_t; typedef ivararg_function <T> ivararg_function_t; typedef igeneric_function <T> igeneric_function_t; typedef details::expression_node <T> expression_node_t; typedef details::literal_node <T> literal_node_t; typedef details::unary_node <T> unary_node_t; typedef details::binary_node <T> binary_node_t; typedef details::trinary_node <T> trinary_node_t; typedef details::quaternary_node <T> quaternary_node_t; typedef details::conditional_node<T> conditional_node_t; typedef details::cons_conditional_node<T> cons_conditional_node_t; typedef details::while_loop_node <T> while_loop_node_t; typedef details::repeat_until_loop_node<T> repeat_until_loop_node_t; typedef details::for_loop_node <T> for_loop_node_t; #ifndef exprtk_disable_break_continue typedef details::while_loop_bc_node <T> while_loop_bc_node_t; typedef details::repeat_until_loop_bc_node<T> repeat_until_loop_bc_node_t; typedef details::for_loop_bc_node<T> for_loop_bc_node_t; #endif typedef details::switch_node <T> switch_node_t; typedef details::variable_node <T> variable_node_t; typedef details::vector_elem_node<T> vector_elem_node_t; typedef details::rebasevector_elem_node<T> rebasevector_elem_node_t; typedef details::rebasevector_celem_node<T> rebasevector_celem_node_t; typedef details::vector_node <T> vector_node_t; typedef details::range_pack <T> range_t; #ifndef exprtk_disable_string_capabilities typedef details::stringvar_node <T> stringvar_node_t; typedef details::string_literal_node<T> string_literal_node_t; typedef details::string_range_node <T> string_range_node_t; typedef details::const_string_range_node<T> const_string_range_node_t; typedef details::generic_string_range_node<T> generic_string_range_node_t; typedef details::string_concat_node <T> string_concat_node_t; typedef details::assignment_string_node<T> assignment_string_node_t; typedef details::assignment_string_range_node<T> assignment_string_range_node_t; typedef details::conditional_string_node<T> conditional_string_node_t; typedef details::cons_conditional_str_node<T> cons_conditional_str_node_t; #endif typedef details::assignment_node<T> assignment_node_t; typedef details::assignment_vec_elem_node <T> assignment_vec_elem_node_t; typedef details::assignment_rebasevec_elem_node <T> assignment_rebasevec_elem_node_t; typedef details::assignment_rebasevec_celem_node<T> assignment_rebasevec_celem_node_t; typedef details::assignment_vec_node <T> assignment_vec_node_t; typedef details::assignment_vecvec_node <T> assignment_vecvec_node_t; typedef details::scand_node<T> scand_node_t; typedef details::scor_node<T> scor_node_t; typedef lexer::token token_t; typedef expression_node_t* expression_node_ptr; typedef expression<T> expression_t; typedef symbol_table<T> symbol_table_t; typedef typename expression<T>::symtab_list_t symbol_table_list_t; typedef details::vector_holder<T>* vector_holder_ptr; typedef typename details::functor_t<T> functor_t; typedef typename functor_t::qfunc_t quaternary_functor_t; typedef typename functor_t::tfunc_t trinary_functor_t; typedef typename functor_t::bfunc_t binary_functor_t; typedef typename functor_t::ufunc_t unary_functor_t; typedef details::operator_type operator_t; typedef std::map<operator_t, unary_functor_t> unary_op_map_t; typedef std::map<operator_t, binary_functor_t> binary_op_map_t; typedef std::map<operator_t,trinary_functor_t> trinary_op_map_t; typedef std::map<std::string,std::pair<trinary_functor_t ,operator_t> > sf3_map_t; typedef std::map<std::string,std::pair<quaternary_functor_t,operator_t> > sf4_map_t; typedef std::map<binary_functor_t,operator_t> inv_binary_op_map_t; typedef std::multimap<std::string,details::base_operation_t,details::ilesscompare> base_ops_map_t; typedef std::set<std::string,details::ilesscompare> disabled_func_set_t; typedef details::T0oT1_define<T, cref_t, cref_t> vov_t; typedef details::T0oT1_define<T, const_t, cref_t> cov_t; typedef details::T0oT1_define<T, cref_t, const_t> voc_t; typedef details::T0oT1oT2_define<T, cref_t, cref_t, cref_t> vovov_t; typedef details::T0oT1oT2_define<T, cref_t, cref_t, const_t> vovoc_t; typedef details::T0oT1oT2_define<T, cref_t, const_t, cref_t> vocov_t; typedef details::T0oT1oT2_define<T, const_t, cref_t, cref_t> covov_t; typedef details::T0oT1oT2_define<T, const_t, cref_t, const_t> covoc_t; typedef details::T0oT1oT2_define<T, const_t, const_t, cref_t> cocov_t; typedef details::T0oT1oT2_define<T, cref_t, const_t, const_t> vococ_t; typedef details::T0oT1oT2oT3_define<T, cref_t, cref_t, cref_t, cref_t> vovovov_t; typedef details::T0oT1oT2oT3_define<T, cref_t, cref_t, cref_t, const_t> vovovoc_t; typedef details::T0oT1oT2oT3_define<T, cref_t, cref_t, const_t, cref_t> vovocov_t; typedef details::T0oT1oT2oT3_define<T, cref_t, const_t, cref_t, cref_t> vocovov_t; typedef details::T0oT1oT2oT3_define<T, const_t, cref_t, cref_t, cref_t> covovov_t; typedef details::T0oT1oT2oT3_define<T, const_t, cref_t, const_t, cref_t> covocov_t; typedef details::T0oT1oT2oT3_define<T, cref_t, const_t, cref_t, const_t> vocovoc_t; typedef details::T0oT1oT2oT3_define<T, const_t, cref_t, cref_t, const_t> covovoc_t; typedef details::T0oT1oT2oT3_define<T, cref_t, const_t, const_t, cref_t> vococov_t; typedef results_context<T> results_context_t; typedef parser_helper prsrhlpr_t; struct scope_element { enum element_type { e_none , e_variable, e_vector , e_vecelem , e_string }; typedef details::vector_holder<T> vector_holder_t; typedef variable_node_t* variable_node_ptr; typedef vector_holder_t* vector_holder_ptr; typedef expression_node_t* expression_node_ptr; #ifndef exprtk_disable_string_capabilities typedef stringvar_node_t* stringvar_node_ptr; #endif scope_element() : name("???"), size (std::numeric_limits<std::size_t>::max()), index(std::numeric_limits<std::size_t>::max()), depth(std::numeric_limits<std::size_t>::max()), ref_count(0), ip_index (0), type (e_none), active(false), data (0), var_node(0), vec_node(0) #ifndef exprtk_disable_string_capabilities ,str_node(0) #endif {} bool operator < (const scope_element& se) const { if (ip_index < se.ip_index) return true; else if (ip_index > se.ip_index) return false; else if (depth < se.depth) return true; else if (depth > se.depth) return false; else if (index < se.index) return true; else if (index > se.index) return false; else return (name < se.name); } void clear() { name = "???"; size = std::numeric_limits<std::size_t>::max(); index = std::numeric_limits<std::size_t>::max(); depth = std::numeric_limits<std::size_t>::max(); type = e_none; active = false; ref_count = 0; ip_index = 0; data = 0; var_node = 0; vec_node = 0; #ifndef exprtk_disable_string_capabilities str_node = 0; #endif } std::string name; std::size_t size; std::size_t index; std::size_t depth; std::size_t ref_count; std::size_t ip_index; element_type type; bool active; void* data; expression_node_ptr var_node; vector_holder_ptr vec_node; #ifndef exprtk_disable_string_capabilities stringvar_node_ptr str_node; #endif }; class scope_element_manager { public: typedef expression_node_t* expression_node_ptr; typedef variable_node_t* variable_node_ptr; typedef parser<T> parser_t; scope_element_manager(parser<T>& p) : parser_(p), input_param_cnt_(0) {} inline std::size_t size() const { return element_.size(); } inline bool empty() const { return element_.empty(); } inline scope_element& get_element(const std::size_t& index) { if (index < element_.size()) return element_[index]; else return null_element_; } inline scope_element& get_element(const std::string& var_name, const std::size_t index = std::numeric_limits<std::size_t>::max()) { const std::size_t current_depth = parser_.state_.scope_depth; for (std::size_t i = 0; i < element_.size(); ++i) { scope_element& se = element_[i]; if (se.depth > current_depth) continue; else if ( details::imatch(se.name, var_name) && (se.index == index) ) return se; } return null_element_; } inline scope_element& get_active_element(const std::string& var_name, const std::size_t index = std::numeric_limits<std::size_t>::max()) { const std::size_t current_depth = parser_.state_.scope_depth; for (std::size_t i = 0; i < element_.size(); ++i) { scope_element& se = element_[i]; if (se.depth > current_depth) continue; else if ( details::imatch(se.name, var_name) && (se.index == index) && (se.active) ) return se; } return null_element_; } inline bool add_element(const scope_element& se) { for (std::size_t i = 0; i < element_.size(); ++i) { scope_element& cse = element_[i]; if ( details::imatch(cse.name, se.name) && (cse.depth <= se.depth) && (cse.index == se.index) && (cse.size == se.size ) && (cse.type == se.type ) && (cse.active) ) return false; } element_.push_back(se); std::sort(element_.begin(),element_.end()); return true; } inline void deactivate(const std::size_t& scope_depth) { exprtk_debug(("deactivate() - Scope depth: %d\n", static_cast<int>(parser_.state_.scope_depth))); for (std::size_t i = 0; i < element_.size(); ++i) { scope_element& se = element_[i]; if (se.active && (se.depth >= scope_depth)) { exprtk_debug(("deactivate() - element[%02d] '%s'\n", static_cast<int>(i), se.name.c_str())); se.active = false; } } } inline void free_element(scope_element& se) { switch (se.type) { case scope_element::e_variable : if (se.data ) delete (T*) se.data; if (se.var_node) delete se.var_node; break; case scope_element::e_vector : if (se.data ) delete[] (T*) se.data; if (se.vec_node) delete se.vec_node; break; case scope_element::e_vecelem : if (se.var_node) delete se.var_node; break; #ifndef exprtk_disable_string_capabilities case scope_element::e_string : if (se.data ) delete (std::string*) se.data; if (se.str_node) delete se.str_node; break; #endif default : return; } se.clear(); } inline void cleanup() { for (std::size_t i = 0; i < element_.size(); ++i) { free_element(element_[i]); } element_.clear(); input_param_cnt_ = 0; } inline std::size_t next_ip_index() { return ++input_param_cnt_; } inline expression_node_ptr get_variable(const T& v) { for (std::size_t i = 0; i < element_.size(); ++i) { scope_element& se = element_[i]; if ( se.active && se.var_node && details::is_variable_node(se.var_node) ) { variable_node_ptr vn = reinterpret_cast<variable_node_ptr>(se.var_node); if (&(vn->ref()) == (&v)) { return se.var_node; } } } return expression_node_ptr(0); } private: scope_element_manager& operator=(const scope_element_manager&); parser_t& parser_; std::vector<scope_element> element_; scope_element null_element_; std::size_t input_param_cnt_; }; class scope_handler { public: typedef parser<T> parser_t; scope_handler(parser<T>& p) : parser_(p) { parser_.state_.scope_depth++; #ifdef exprtk_enable_debugging std::string depth(2 * parser_.state_.scope_depth,'-'); exprtk_debug(("%s> Scope Depth: %02d\n", depth.c_str(), static_cast<int>(parser_.state_.scope_depth))); #endif } ~scope_handler() { parser_.sem_.deactivate(parser_.state_.scope_depth); parser_.state_.scope_depth--; #ifdef exprtk_enable_debugging std::string depth(2 * parser_.state_.scope_depth,'-'); exprtk_debug(("<%s Scope Depth: %02d\n", depth.c_str(), static_cast<int>(parser_.state_.scope_depth))); #endif } private: scope_handler& operator=(const scope_handler&); parser_t& parser_; }; struct symtab_store { symbol_table_list_t symtab_list_; typedef typename symbol_table_t::local_data_t local_data_t; typedef typename symbol_table_t::variable_ptr variable_ptr; typedef typename symbol_table_t::function_ptr function_ptr; #ifndef exprtk_disable_string_capabilities typedef typename symbol_table_t::stringvar_ptr stringvar_ptr; #endif typedef typename symbol_table_t::vector_holder_ptr vector_holder_ptr; typedef typename symbol_table_t::vararg_function_ptr vararg_function_ptr; typedef typename symbol_table_t::generic_function_ptr generic_function_ptr; inline bool empty() const { return symtab_list_.empty(); } inline void clear() { symtab_list_.clear(); } inline bool valid() const { if (!empty()) { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (symtab_list_[i].valid()) return true; } } return false; } inline bool valid_symbol(const std::string& symbol) const { if (!symtab_list_.empty()) return symtab_list_[0].valid_symbol(symbol); else return false; } inline bool valid_function_name(const std::string& symbol) const { if (!symtab_list_.empty()) return symtab_list_[0].valid_function(symbol); else return false; } inline variable_ptr get_variable(const std::string& variable_name) const { if (!valid_symbol(variable_name)) return reinterpret_cast<variable_ptr>(0); variable_ptr result = reinterpret_cast<variable_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i) .variable_store.get(variable_name); if (result) break; } return result; } inline variable_ptr get_variable(const T& var_ref) const { variable_ptr result = reinterpret_cast<variable_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i).variable_store .get_from_varptr(reinterpret_cast<const void*>(&var_ref)); if (result) break; } return result; } #ifndef exprtk_disable_string_capabilities inline stringvar_ptr get_stringvar(const std::string& string_name) const { if (!valid_symbol(string_name)) return reinterpret_cast<stringvar_ptr>(0); stringvar_ptr result = reinterpret_cast<stringvar_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i) .stringvar_store.get(string_name); if (result) break; } return result; } #endif inline function_ptr get_function(const std::string& function_name) const { if (!valid_function_name(function_name)) return reinterpret_cast<function_ptr>(0); function_ptr result = reinterpret_cast<function_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i) .function_store.get(function_name); if (result) break; } return result; } inline vararg_function_ptr get_vararg_function(const std::string& vararg_function_name) const { if (!valid_function_name(vararg_function_name)) return reinterpret_cast<vararg_function_ptr>(0); vararg_function_ptr result = reinterpret_cast<vararg_function_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i) .vararg_function_store.get(vararg_function_name); if (result) break; } return result; } inline generic_function_ptr get_generic_function(const std::string& function_name) const { if (!valid_function_name(function_name)) return reinterpret_cast<generic_function_ptr>(0); generic_function_ptr result = reinterpret_cast<generic_function_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i) .generic_function_store.get(function_name); if (result) break; } return result; } inline generic_function_ptr get_string_function(const std::string& function_name) const { if (!valid_function_name(function_name)) return reinterpret_cast<generic_function_ptr>(0); generic_function_ptr result = reinterpret_cast<generic_function_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i).string_function_store.get(function_name); if (result) break; } return result; } inline vector_holder_ptr get_vector(const std::string& vector_name) const { if (!valid_symbol(vector_name)) return reinterpret_cast<vector_holder_ptr>(0); vector_holder_ptr result = reinterpret_cast<vector_holder_ptr>(0); for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else result = local_data(i).vector_store.get(vector_name); if (result) break; } return result; } inline bool is_constant_node(const std::string& symbol_name) const { if (!valid_symbol(symbol_name)) return false; for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if (local_data(i).variable_store.is_constant(symbol_name)) return true; } return false; } #ifndef exprtk_disable_string_capabilities inline bool is_constant_string(const std::string& symbol_name) const { if (!valid_symbol(symbol_name)) return false; for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if (!local_data(i).stringvar_store.symbol_exists(symbol_name)) continue; else if ( local_data(i).stringvar_store.is_constant(symbol_name)) return true; } return false; } #endif inline bool symbol_exists(const std::string& symbol) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if (symtab_list_[i].symbol_exists(symbol)) return true; } return false; } inline bool is_variable(const std::string& variable_name) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if ( symtab_list_[i].local_data().variable_store .symbol_exists(variable_name) ) return true; } return false; } #ifndef exprtk_disable_string_capabilities inline bool is_stringvar(const std::string& stringvar_name) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if ( symtab_list_[i].local_data().stringvar_store .symbol_exists(stringvar_name) ) return true; } return false; } inline bool is_conststr_stringvar(const std::string& symbol_name) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if ( symtab_list_[i].local_data().stringvar_store .symbol_exists(symbol_name) ) { return ( local_data(i).stringvar_store.symbol_exists(symbol_name) || local_data(i).stringvar_store.is_constant (symbol_name) ); } } return false; } #endif inline bool is_function(const std::string& function_name) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if ( local_data(i).vararg_function_store .symbol_exists(function_name) ) return true; } return false; } inline bool is_vararg_function(const std::string& vararg_function_name) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if ( local_data(i).vararg_function_store .symbol_exists(vararg_function_name) ) return true; } return false; } inline bool is_vector(const std::string& vector_name) const { for (std::size_t i = 0; i < symtab_list_.size(); ++i) { if (!symtab_list_[i].valid()) continue; else if ( local_data(i).vector_store .symbol_exists(vector_name) ) return true; } return false; } inline std::string get_variable_name(const expression_node_ptr& ptr) const { return local_data().variable_store.entity_name(ptr); } inline std::string get_vector_name(const vector_holder_ptr& ptr) const { return local_data().vector_store.entity_name(ptr); } #ifndef exprtk_disable_string_capabilities inline std::string get_stringvar_name(const expression_node_ptr& ptr) const { return local_data().stringvar_store.entity_name(ptr); } inline std::string get_conststr_stringvar_name(const expression_node_ptr& ptr) const { return local_data().stringvar_store.entity_name(ptr); } #endif inline local_data_t& local_data(const std::size_t& index = 0) { return symtab_list_[index].local_data(); } inline const local_data_t& local_data(const std::size_t& index = 0) const { return symtab_list_[index].local_data(); } inline symbol_table_t& get_symbol_table(const std::size_t& index = 0) { return symtab_list_[index]; } }; struct parser_state { parser_state() : type_check_enabled(true) { reset(); } void reset() { parsing_return_stmt = false; parsing_break_stmt = false; return_stmt_present = false; side_effect_present = false; scope_depth = 0; } #ifndef exprtk_enable_debugging void activate_side_effect(const std::string&) #else void activate_side_effect(const std::string& source) #endif { if (!side_effect_present) { side_effect_present = true; exprtk_debug(("activate_side_effect() - caller: %s\n",source.c_str())); } } bool parsing_return_stmt; bool parsing_break_stmt; bool return_stmt_present; bool side_effect_present; bool type_check_enabled; std::size_t scope_depth; }; public: struct unknown_symbol_resolver { enum usr_symbol_type { e_usr_variable_type = 0, e_usr_constant_type = 1 }; enum usr_mode { e_usrmode_default = 0, e_usrmode_extended = 1 }; usr_mode mode; unknown_symbol_resolver(const usr_mode m = e_usrmode_default) : mode(m) {} virtual ~unknown_symbol_resolver() {} virtual bool process(const std::string& /*unknown_symbol*/, usr_symbol_type& st, T& default_value, std::string& error_message) { if (e_usrmode_default != mode) return false; st = e_usr_variable_type; default_value = T(0); error_message.clear(); return true; } virtual bool process(const std::string& /* unknown_symbol */, symbol_table_t& /* symbol_table */, std::string& /* error_message */) { return false; } }; enum collect_type { e_ct_none = 0, e_ct_variables = 1, e_ct_functions = 2, e_ct_assignments = 4 }; enum symbol_type { e_st_unknown = 0, e_st_variable = 1, e_st_vector = 2, e_st_vecelem = 3, e_st_string = 4, e_st_function = 5, e_st_local_variable = 6, e_st_local_vector = 7, e_st_local_string = 8 }; class dependent_entity_collector { public: typedef std::pair<std::string,symbol_type> symbol_t; typedef std::vector<symbol_t> symbol_list_t; dependent_entity_collector(const std::size_t options = e_ct_none) : options_(options), collect_variables_ ((options_ & e_ct_variables ) == e_ct_variables ), collect_functions_ ((options_ & e_ct_functions ) == e_ct_functions ), collect_assignments_((options_ & e_ct_assignments) == e_ct_assignments), return_present_ (false), final_stmt_return_(false) {} template <typename Allocator, template <typename,typename> class Sequence> inline std::size_t symbols(Sequence<symbol_t,Allocator>& symbols_list) { if (!collect_variables_ && !collect_functions_) return 0; else if (symbol_name_list_.empty()) return 0; for (std::size_t i = 0; i < symbol_name_list_.size(); ++i) { details::case_normalise(symbol_name_list_[i].first); } std::sort(symbol_name_list_.begin(),symbol_name_list_.end()); std::unique_copy(symbol_name_list_.begin(), symbol_name_list_.end (), std::back_inserter(symbols_list)); return symbols_list.size(); } template <typename Allocator, template <typename,typename> class Sequence> inline std::size_t assignment_symbols(Sequence<symbol_t,Allocator>& assignment_list) { if (!collect_assignments_) return 0; else if (assignment_name_list_.empty()) return 0; for (std::size_t i = 0; i < assignment_name_list_.size(); ++i) { details::case_normalise(assignment_name_list_[i].first); } std::sort(assignment_name_list_.begin(),assignment_name_list_.end()); std::unique_copy(assignment_name_list_.begin(), assignment_name_list_.end (), std::back_inserter(assignment_list)); return assignment_list.size(); } void clear() { symbol_name_list_ .clear(); assignment_name_list_.clear(); retparam_list_ .clear(); return_present_ = false; final_stmt_return_ = false; } bool& collect_variables() { return collect_variables_; } bool& collect_functions() { return collect_functions_; } bool& collect_assignments() { return collect_assignments_; } bool return_present() const { return return_present_; } bool final_stmt_return() const { return final_stmt_return_; } typedef std::vector<std::string> retparam_list_t; retparam_list_t return_param_type_list() const { return retparam_list_; } private: inline void add_symbol(const std::string& symbol, const symbol_type st) { switch (st) { case e_st_variable : case e_st_vector : case e_st_string : case e_st_local_variable : case e_st_local_vector : case e_st_local_string : if (collect_variables_) symbol_name_list_ .push_back(std::make_pair(symbol, st)); break; case e_st_function : if (collect_functions_) symbol_name_list_ .push_back(std::make_pair(symbol, st)); break; default : return; } } inline void add_assignment(const std::string& symbol, const symbol_type st) { switch (st) { case e_st_variable : case e_st_vector : case e_st_string : if (collect_assignments_) assignment_name_list_ .push_back(std::make_pair(symbol, st)); break; default : return; } } std::size_t options_; bool collect_variables_; bool collect_functions_; bool collect_assignments_; bool return_present_; bool final_stmt_return_; symbol_list_t symbol_name_list_; symbol_list_t assignment_name_list_; retparam_list_t retparam_list_; friend class parser<T>; }; class settings_store { private: typedef std::set<std::string,details::ilesscompare> disabled_entity_set_t; typedef disabled_entity_set_t::iterator des_itr_t; public: enum settings_compilation_options { e_unknown = 0, e_replacer = 1, e_joiner = 2, e_numeric_check = 4, e_bracket_check = 8, e_sequence_check = 16, e_commutative_check = 32, e_strength_reduction = 64, e_disable_vardef = 128, e_collect_vars = 256, e_collect_funcs = 512, e_collect_assings = 1024, e_disable_usr_on_rsrvd = 2048, e_disable_zero_return = 4096 }; enum settings_base_funcs { e_bf_unknown = 0, e_bf_abs , e_bf_acos , e_bf_acosh , e_bf_asin , e_bf_asinh , e_bf_atan , e_bf_atan2 , e_bf_atanh , e_bf_avg , e_bf_ceil , e_bf_clamp , e_bf_cos , e_bf_cosh , e_bf_cot , e_bf_csc , e_bf_equal , e_bf_erf , e_bf_erfc , e_bf_exp , e_bf_expm1 , e_bf_floor , e_bf_frac , e_bf_hypot , e_bf_iclamp , e_bf_like , e_bf_log , e_bf_log10 , e_bf_log1p , e_bf_log2 , e_bf_logn , e_bf_mand , e_bf_max , e_bf_min , e_bf_mod , e_bf_mor , e_bf_mul , e_bf_ncdf , e_bf_pow , e_bf_root , e_bf_round , e_bf_roundn , e_bf_sec , e_bf_sgn , e_bf_sin , e_bf_sinc , e_bf_sinh , e_bf_sqrt , e_bf_sum , e_bf_swap , e_bf_tan , e_bf_tanh , e_bf_trunc , e_bf_not_equal , e_bf_inrange , e_bf_deg2grad , e_bf_deg2rad, e_bf_rad2deg , e_bf_grad2deg }; enum settings_control_structs { e_ctrl_unknown = 0, e_ctrl_ifelse, e_ctrl_switch, e_ctrl_for_loop, e_ctrl_while_loop, e_ctrl_repeat_loop, e_ctrl_return }; enum settings_logic_opr { e_logic_unknown = 0, e_logic_and, e_logic_nand, e_logic_nor, e_logic_not, e_logic_or, e_logic_xnor, e_logic_xor, e_logic_scand, e_logic_scor }; enum settings_arithmetic_opr { e_arith_unknown = 0, e_arith_add, e_arith_sub, e_arith_mul, e_arith_div, e_arith_mod, e_arith_pow }; enum settings_assignment_opr { e_assign_unknown = 0, e_assign_assign, e_assign_addass, e_assign_subass, e_assign_mulass, e_assign_divass, e_assign_modass }; enum settings_inequality_opr { e_ineq_unknown = 0, e_ineq_lt, e_ineq_lte, e_ineq_eq, e_ineq_equal, e_ineq_ne, e_ineq_nequal, e_ineq_gte, e_ineq_gt }; static const std::size_t compile_all_opts = e_replacer + e_joiner + e_numeric_check + e_bracket_check + e_sequence_check + e_commutative_check + e_strength_reduction; settings_store(const std::size_t compile_options = compile_all_opts) { load_compile_options(compile_options); } settings_store& enable_all_base_functions() { disabled_func_set_.clear(); return (*this); } settings_store& enable_all_control_structures() { disabled_ctrl_set_.clear(); return (*this); } settings_store& enable_all_logic_ops() { disabled_logic_set_.clear(); return (*this); } settings_store& enable_all_arithmetic_ops() { disabled_arithmetic_set_.clear(); return (*this); } settings_store& enable_all_assignment_ops() { disabled_assignment_set_.clear(); return (*this); } settings_store& enable_all_inequality_ops() { disabled_inequality_set_.clear(); return (*this); } settings_store& enable_local_vardef() { disable_vardef_ = false; return (*this); } settings_store& disable_all_base_functions() { std::copy(details::base_function_list, details::base_function_list + details::base_function_list_size, std::insert_iterator<disabled_entity_set_t> (disabled_func_set_, disabled_func_set_.begin())); return (*this); } settings_store& disable_all_control_structures() { std::copy(details::cntrl_struct_list, details::cntrl_struct_list + details::cntrl_struct_list_size, std::insert_iterator<disabled_entity_set_t> (disabled_ctrl_set_, disabled_ctrl_set_.begin())); return (*this); } settings_store& disable_all_logic_ops() { std::copy(details::logic_ops_list, details::logic_ops_list + details::logic_ops_list_size, std::insert_iterator<disabled_entity_set_t> (disabled_logic_set_, disabled_logic_set_.begin())); return (*this); } settings_store& disable_all_arithmetic_ops() { std::copy(details::arithmetic_ops_list, details::arithmetic_ops_list + details::arithmetic_ops_list_size, std::insert_iterator<disabled_entity_set_t> (disabled_arithmetic_set_, disabled_arithmetic_set_.begin())); return (*this); } settings_store& disable_all_assignment_ops() { std::copy(details::assignment_ops_list, details::assignment_ops_list + details::assignment_ops_list_size, std::insert_iterator<disabled_entity_set_t> (disabled_assignment_set_, disabled_assignment_set_.begin())); return (*this); } settings_store& disable_all_inequality_ops() { std::copy(details::inequality_ops_list, details::inequality_ops_list + details::inequality_ops_list_size, std::insert_iterator<disabled_entity_set_t> (disabled_inequality_set_, disabled_inequality_set_.begin())); return (*this); } settings_store& disable_local_vardef() { disable_vardef_ = true; return (*this); } bool replacer_enabled () const { return enable_replacer_; } bool commutative_check_enabled () const { return enable_commutative_check_; } bool joiner_enabled () const { return enable_joiner_; } bool numeric_check_enabled () const { return enable_numeric_check_; } bool bracket_check_enabled () const { return enable_bracket_check_; } bool sequence_check_enabled () const { return enable_sequence_check_; } bool strength_reduction_enabled () const { return enable_strength_reduction_; } bool collect_variables_enabled () const { return enable_collect_vars_; } bool collect_functions_enabled () const { return enable_collect_funcs_; } bool collect_assignments_enabled() const { return enable_collect_assings_; } bool vardef_disabled () const { return disable_vardef_; } bool rsrvd_sym_usr_disabled () const { return disable_rsrvd_sym_usr_; } bool zero_return_disabled () const { return disable_zero_return_; } bool function_enabled(const std::string& function_name) { if (disabled_func_set_.empty()) return true; else return (disabled_func_set_.end() == disabled_func_set_.find(function_name)); } bool control_struct_enabled(const std::string& control_struct) { if (disabled_ctrl_set_.empty()) return true; else return (disabled_ctrl_set_.end() == disabled_ctrl_set_.find(control_struct)); } bool logic_enabled(const std::string& logic_operation) { if (disabled_logic_set_.empty()) return true; else return (disabled_logic_set_.end() == disabled_logic_set_.find(logic_operation)); } bool arithmetic_enabled(const details::operator_type& arithmetic_operation) { if (disabled_logic_set_.empty()) return true; else return disabled_arithmetic_set_.end() == disabled_arithmetic_set_ .find(arith_opr_to_string(arithmetic_operation)); } bool assignment_enabled(const details::operator_type& assignment) { if (disabled_assignment_set_.empty()) return true; else return disabled_assignment_set_.end() == disabled_assignment_set_ .find(assign_opr_to_string(assignment)); } bool inequality_enabled(const details::operator_type& inequality) { if (disabled_inequality_set_.empty()) return true; else return disabled_inequality_set_.end() == disabled_inequality_set_ .find(inequality_opr_to_string(inequality)); } bool function_disabled(const std::string& function_name) { if (disabled_func_set_.empty()) return false; else return (disabled_func_set_.end() != disabled_func_set_.find(function_name)); } bool control_struct_disabled(const std::string& control_struct) { if (disabled_ctrl_set_.empty()) return false; else return (disabled_ctrl_set_.end() != disabled_ctrl_set_.find(control_struct)); } bool logic_disabled(const std::string& logic_operation) { if (disabled_logic_set_.empty()) return false; else return (disabled_logic_set_.end() != disabled_logic_set_.find(logic_operation)); } bool assignment_disabled(const details::operator_type assignment_operation) { if (disabled_assignment_set_.empty()) return false; else return disabled_assignment_set_.end() != disabled_assignment_set_ .find(assign_opr_to_string(assignment_operation)); } bool arithmetic_disabled(const details::operator_type arithmetic_operation) { if (disabled_arithmetic_set_.empty()) return false; else return disabled_arithmetic_set_.end() != disabled_arithmetic_set_ .find(arith_opr_to_string(arithmetic_operation)); } bool inequality_disabled(const details::operator_type& inequality) { if (disabled_inequality_set_.empty()) return false; else return disabled_inequality_set_.end() != disabled_inequality_set_ .find(inequality_opr_to_string(inequality)); } settings_store& disable_base_function(settings_base_funcs bf) { if ( (e_bf_unknown != bf) && (static_cast<std::size_t>(bf) < (details::base_function_list_size + 1)) ) { disabled_func_set_.insert(details::base_function_list[bf - 1]); } return (*this); } settings_store& disable_control_structure(settings_control_structs ctrl_struct) { if ( (e_ctrl_unknown != ctrl_struct) && (static_cast<std::size_t>(ctrl_struct) < (details::cntrl_struct_list_size + 1)) ) { disabled_ctrl_set_.insert(details::cntrl_struct_list[ctrl_struct - 1]); } return (*this); } settings_store& disable_logic_operation(settings_logic_opr logic) { if ( (e_logic_unknown != logic) && (static_cast<std::size_t>(logic) < (details::logic_ops_list_size + 1)) ) { disabled_logic_set_.insert(details::logic_ops_list[logic - 1]); } return (*this); } settings_store& disable_arithmetic_operation(settings_arithmetic_opr arithmetic) { if ( (e_arith_unknown != arithmetic) && (static_cast<std::size_t>(arithmetic) < (details::arithmetic_ops_list_size + 1)) ) { disabled_arithmetic_set_.insert(details::arithmetic_ops_list[arithmetic - 1]); } return (*this); } settings_store& disable_assignment_operation(settings_assignment_opr assignment) { if ( (e_assign_unknown != assignment) && (static_cast<std::size_t>(assignment) < (details::assignment_ops_list_size + 1)) ) { disabled_assignment_set_.insert(details::assignment_ops_list[assignment - 1]); } return (*this); } settings_store& disable_inequality_operation(settings_inequality_opr inequality) { if ( (e_ineq_unknown != inequality) && (static_cast<std::size_t>(inequality) < (details::inequality_ops_list_size + 1)) ) { disabled_inequality_set_.insert(details::inequality_ops_list[inequality - 1]); } return (*this); } settings_store& enable_base_function(settings_base_funcs bf) { if ( (e_bf_unknown != bf) && (static_cast<std::size_t>(bf) < (details::base_function_list_size + 1)) ) { const des_itr_t itr = disabled_func_set_.find(details::base_function_list[bf - 1]); if (disabled_func_set_.end() != itr) { disabled_func_set_.erase(itr); } } return (*this); } settings_store& enable_control_structure(settings_control_structs ctrl_struct) { if ( (e_ctrl_unknown != ctrl_struct) && (static_cast<std::size_t>(ctrl_struct) < (details::cntrl_struct_list_size + 1)) ) { const des_itr_t itr = disabled_ctrl_set_.find(details::cntrl_struct_list[ctrl_struct - 1]); if (disabled_ctrl_set_.end() != itr) { disabled_ctrl_set_.erase(itr); } } return (*this); } settings_store& enable_logic_operation(settings_logic_opr logic) { if ( (e_logic_unknown != logic) && (static_cast<std::size_t>(logic) < (details::logic_ops_list_size + 1)) ) { const des_itr_t itr = disabled_logic_set_.find(details::logic_ops_list[logic - 1]); if (disabled_logic_set_.end() != itr) { disabled_logic_set_.erase(itr); } } return (*this); } settings_store& enable_arithmetic_operation(settings_arithmetic_opr arithmetic) { if ( (e_arith_unknown != arithmetic) && (static_cast<std::size_t>(arithmetic) < (details::arithmetic_ops_list_size + 1)) ) { const des_itr_t itr = disabled_arithmetic_set_.find(details::arithmetic_ops_list[arithmetic - 1]); if (disabled_arithmetic_set_.end() != itr) { disabled_arithmetic_set_.erase(itr); } } return (*this); } settings_store& enable_assignment_operation(settings_assignment_opr assignment) { if ( (e_assign_unknown != assignment) && (static_cast<std::size_t>(assignment) < (details::assignment_ops_list_size + 1)) ) { const des_itr_t itr = disabled_assignment_set_.find(details::assignment_ops_list[assignment - 1]); if (disabled_assignment_set_.end() != itr) { disabled_assignment_set_.erase(itr); } } return (*this); } settings_store& enable_inequality_operation(settings_inequality_opr inequality) { if ( (e_ineq_unknown != inequality) && (static_cast<std::size_t>(inequality) < (details::inequality_ops_list_size + 1)) ) { const des_itr_t itr = disabled_inequality_set_.find(details::inequality_ops_list[inequality - 1]); if (disabled_inequality_set_.end() != itr) { disabled_inequality_set_.erase(itr); } } return (*this); } private: void load_compile_options(const std::size_t compile_options) { enable_replacer_ = (compile_options & e_replacer ) == e_replacer; enable_joiner_ = (compile_options & e_joiner ) == e_joiner; enable_numeric_check_ = (compile_options & e_numeric_check ) == e_numeric_check; enable_bracket_check_ = (compile_options & e_bracket_check ) == e_bracket_check; enable_sequence_check_ = (compile_options & e_sequence_check ) == e_sequence_check; enable_commutative_check_ = (compile_options & e_commutative_check ) == e_commutative_check; enable_strength_reduction_ = (compile_options & e_strength_reduction ) == e_strength_reduction; enable_collect_vars_ = (compile_options & e_collect_vars ) == e_collect_vars; enable_collect_funcs_ = (compile_options & e_collect_funcs ) == e_collect_funcs; enable_collect_assings_ = (compile_options & e_collect_assings ) == e_collect_assings; disable_vardef_ = (compile_options & e_disable_vardef ) == e_disable_vardef; disable_rsrvd_sym_usr_ = (compile_options & e_disable_usr_on_rsrvd) == e_disable_usr_on_rsrvd; disable_zero_return_ = (compile_options & e_disable_zero_return ) == e_disable_zero_return; } std::string assign_opr_to_string(details::operator_type opr) { switch (opr) { case details::e_assign : return ":="; case details::e_addass : return "+="; case details::e_subass : return "-="; case details::e_mulass : return "*="; case details::e_divass : return "/="; case details::e_modass : return "%="; default : return ""; } } std::string arith_opr_to_string(details::operator_type opr) { switch (opr) { case details::e_add : return "+"; case details::e_sub : return "-"; case details::e_mul : return "*"; case details::e_div : return "/"; case details::e_mod : return "%"; default : return ""; } } std::string inequality_opr_to_string(details::operator_type opr) { switch (opr) { case details::e_lt : return "<"; case details::e_lte : return "<="; case details::e_eq : return "=="; case details::e_equal : return "="; case details::e_ne : return "!="; case details::e_nequal: return "<>"; case details::e_gte : return ">="; case details::e_gt : return ">"; default : return ""; } } bool enable_replacer_; bool enable_joiner_; bool enable_numeric_check_; bool enable_bracket_check_; bool enable_sequence_check_; bool enable_commutative_check_; bool enable_strength_reduction_; bool enable_collect_vars_; bool enable_collect_funcs_; bool enable_collect_assings_; bool disable_vardef_; bool disable_rsrvd_sym_usr_; bool disable_zero_return_; disabled_entity_set_t disabled_func_set_ ; disabled_entity_set_t disabled_ctrl_set_ ; disabled_entity_set_t disabled_logic_set_; disabled_entity_set_t disabled_arithmetic_set_; disabled_entity_set_t disabled_assignment_set_; disabled_entity_set_t disabled_inequality_set_; friend class parser<T>; }; typedef settings_store settings_t; parser(const settings_t& settings = settings_t()) : settings_(settings), resolve_unknown_symbol_(false), results_context_(0), unknown_symbol_resolver_(reinterpret_cast<unknown_symbol_resolver*>(0)), #ifdef _MSC_VER #pragma warning(push) #pragma warning (disable:4355) #endif sem_(*this), #ifdef _MSC_VER #pragma warning(pop) #endif operator_joiner_2_(2), operator_joiner_3_(3) { init_precompilation(); load_operations_map (base_ops_map_ ); load_unary_operations_map (unary_op_map_ ); load_binary_operations_map (binary_op_map_ ); load_inv_binary_operations_map(inv_binary_op_map_); load_sf3_map (sf3_map_ ); load_sf4_map (sf4_map_ ); expression_generator_.init_synthesize_map(); expression_generator_.set_parser(*this); expression_generator_.set_uom(unary_op_map_); expression_generator_.set_bom(binary_op_map_); expression_generator_.set_ibom(inv_binary_op_map_); expression_generator_.set_sf3m(sf3_map_); expression_generator_.set_sf4m(sf4_map_); expression_generator_.set_strength_reduction_state(settings_.strength_reduction_enabled()); } ~parser() {} inline void init_precompilation() { if (settings_.collect_variables_enabled()) dec_.collect_variables() = true; if (settings_.collect_functions_enabled()) dec_.collect_functions() = true; if (settings_.collect_assignments_enabled()) dec_.collect_assignments() = true; if (settings_.replacer_enabled()) { symbol_replacer_.clear(); symbol_replacer_.add_replace("true" ,"1",lexer::token::e_number); symbol_replacer_.add_replace("false","0",lexer::token::e_number); helper_assembly_.token_modifier_list.clear(); helper_assembly_.register_modifier(&symbol_replacer_); } if (settings_.commutative_check_enabled()) { for (std::size_t i = 0; i < details::reserved_words_size; ++i) { commutative_inserter_.ignore_symbol(details::reserved_words[i]); } helper_assembly_.token_inserter_list.clear(); helper_assembly_.register_inserter(&commutative_inserter_); } if (settings_.joiner_enabled()) { helper_assembly_.token_joiner_list.clear(); helper_assembly_.register_joiner(&operator_joiner_2_); helper_assembly_.register_joiner(&operator_joiner_3_); } if ( settings_.numeric_check_enabled () || settings_.bracket_check_enabled () || settings_.sequence_check_enabled() ) { helper_assembly_.token_scanner_list.clear(); if (settings_.numeric_check_enabled()) { helper_assembly_.register_scanner(&numeric_checker_); } if (settings_.bracket_check_enabled()) { helper_assembly_.register_scanner(&bracket_checker_); } if (settings_.sequence_check_enabled()) { helper_assembly_.register_scanner(&sequence_validator_); } } } inline bool compile(const std::string& expression_string, expression<T>& expr) { state_ .reset(); error_list_ .clear(); brkcnt_list_ .clear(); synthesis_error_.clear(); sem_ .cleanup(); return_cleanup(); expression_generator_.set_allocator(node_allocator_); if (expression_string.empty()) { set_error( make_error(parser_error::e_syntax, "ERR000 - Empty expression!", exprtk_error_location)); return false; } if (!init(expression_string)) { process_lexer_errors(); return false; } if (lexer().empty()) { set_error( make_error(parser_error::e_syntax, "ERR001 - Empty expression!", exprtk_error_location)); return false; } if (!run_assemblies()) { return false; } symtab_store_.symtab_list_ = expr.get_symbol_table_list(); dec_.clear(); lexer().begin(); next_token(); expression_node_ptr e = parse_corpus(); if ((0 != e) && (token_t::e_eof == current_token().type)) { bool* retinvk_ptr = 0; if (state_.return_stmt_present) { dec_.return_present_ = true; e = expression_generator_ .return_envelope(e,results_context_,retinvk_ptr); } expr.set_expression(e); expr.set_retinvk(retinvk_ptr); register_local_vars(expr); register_return_results(expr); return !(!expr); } else { if (error_list_.empty()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR002 - Invalid expression encountered", exprtk_error_location)); } dec_.clear (); sem_.cleanup (); return_cleanup(); if ((0 != e) && branch_deletable(e)) { destroy_node(e); } return false; } } inline expression_t compile(const std::string& expression_string, symbol_table_t& symtab) { expression_t expr; expr.register_symbol_table(symtab); compile(expression_string,expr); return expr; } void process_lexer_errors() { for (std::size_t i = 0; i < lexer().size(); ++i) { if (lexer()[i].is_error()) { std::string diagnostic = "ERR003 - "; switch (lexer()[i].type) { case lexer::token::e_error : diagnostic += "General token error"; break; case lexer::token::e_err_symbol : diagnostic += "Symbol error"; break; case lexer::token::e_err_number : diagnostic += "Invalid numeric token"; break; case lexer::token::e_err_string : diagnostic += "Invalid string token"; break; case lexer::token::e_err_sfunc : diagnostic += "Invalid special function token"; break; default : diagnostic += "Unknown compiler error"; } set_error( make_error(parser_error::e_lexer, lexer()[i], diagnostic + ": " + lexer()[i].value, exprtk_error_location)); } } } inline bool run_assemblies() { if (settings_.commutative_check_enabled()) { helper_assembly_.run_inserters(lexer()); } if (settings_.joiner_enabled()) { helper_assembly_.run_joiners(lexer()); } if (settings_.replacer_enabled()) { helper_assembly_.run_modifiers(lexer()); } if ( settings_.numeric_check_enabled () || settings_.bracket_check_enabled () || settings_.sequence_check_enabled() ) { if (!helper_assembly_.run_scanners(lexer())) { if (helper_assembly_.error_token_scanner) { lexer::helper::bracket_checker* bracket_checker_ptr = 0; lexer::helper::numeric_checker* numeric_checker_ptr = 0; lexer::helper::sequence_validator* sequence_validator_ptr = 0; if (0 != (bracket_checker_ptr = dynamic_cast<lexer::helper::bracket_checker*>(helper_assembly_.error_token_scanner))) { set_error( make_error(parser_error::e_token, bracket_checker_ptr->error_token(), "ERR004 - Mismatched brackets: '" + bracket_checker_ptr->error_token().value + "'", exprtk_error_location)); } else if (0 != (numeric_checker_ptr = dynamic_cast<lexer::helper::numeric_checker*>(helper_assembly_.error_token_scanner))) { for (std::size_t i = 0; i < numeric_checker_ptr->error_count(); ++i) { lexer::token error_token = lexer()[numeric_checker_ptr->error_index(i)]; set_error( make_error(parser_error::e_token, error_token, "ERR005 - Invalid numeric token: '" + error_token.value + "'", exprtk_error_location)); } if (numeric_checker_ptr->error_count()) { numeric_checker_ptr->clear_errors(); } } else if (0 != (sequence_validator_ptr = dynamic_cast<lexer::helper::sequence_validator*>(helper_assembly_.error_token_scanner))) { for (std::size_t i = 0; i < sequence_validator_ptr->error_count(); ++i) { std::pair<lexer::token,lexer::token> error_token = sequence_validator_ptr->error(i); set_error( make_error(parser_error::e_token, error_token.first, "ERR006 - Invalid token sequence: '" + error_token.first.value + "' and '" + error_token.second.value + "'", exprtk_error_location)); } if (sequence_validator_ptr->error_count()) { sequence_validator_ptr->clear_errors(); } } } return false; } } return true; } inline settings_store& settings() { return settings_; } inline parser_error::type get_error(const std::size_t& index) { if (index < error_list_.size()) return error_list_[index]; else throw std::invalid_argument("parser::get_error() - Invalid error index specificed"); } inline std::string error() const { if (!error_list_.empty()) { return error_list_[0].diagnostic; } else return std::string("No Error"); } inline std::size_t error_count() const { return error_list_.size(); } inline dependent_entity_collector& dec() { return dec_; } inline bool replace_symbol(const std::string& old_symbol, const std::string& new_symbol) { if (!settings_.replacer_enabled()) return false; else if (details::is_reserved_word(old_symbol)) return false; else return symbol_replacer_.add_replace(old_symbol,new_symbol,lexer::token::e_symbol); } inline bool remove_replace_symbol(const std::string& symbol) { if (!settings_.replacer_enabled()) return false; else if (details::is_reserved_word(symbol)) return false; else return symbol_replacer_.remove(symbol); } inline void enable_unknown_symbol_resolver(unknown_symbol_resolver* usr = reinterpret_cast<unknown_symbol_resolver*>(0)) { resolve_unknown_symbol_ = true; if (usr) unknown_symbol_resolver_ = usr; else unknown_symbol_resolver_ = &default_usr_; } inline void enable_unknown_symbol_resolver(unknown_symbol_resolver& usr) { enable_unknown_symbol_resolver(&usr); } inline void disable_unknown_symbol_resolver() { resolve_unknown_symbol_ = false; unknown_symbol_resolver_ = &default_usr_; } private: inline bool valid_base_operation(const std::string& symbol) { const std::size_t length = symbol.size(); if ( (length < 3) || // Shortest base op symbol length (length > 9) // Longest base op symbol length ) return false; else return settings_.function_enabled(symbol) && (base_ops_map_.end() != base_ops_map_.find(symbol)); } inline bool valid_vararg_operation(const std::string& symbol) { static const std::string s_sum = "sum" ; static const std::string s_mul = "mul" ; static const std::string s_avg = "avg" ; static const std::string s_min = "min" ; static const std::string s_max = "max" ; static const std::string s_mand = "mand"; static const std::string s_mor = "mor" ; static const std::string s_multi = "~" ; static const std::string s_mswitch = "[*]" ; return ( details::imatch(symbol,s_sum ) || details::imatch(symbol,s_mul ) || details::imatch(symbol,s_avg ) || details::imatch(symbol,s_min ) || details::imatch(symbol,s_max ) || details::imatch(symbol,s_mand ) || details::imatch(symbol,s_mor ) || details::imatch(symbol,s_multi ) || details::imatch(symbol,s_mswitch) ) && settings_.function_enabled(symbol); } bool is_invalid_arithmetic_operation(const details::operator_type operation) { return settings_.arithmetic_disabled(operation); } bool is_invalid_assignment_operation(const details::operator_type operation) { return settings_.assignment_disabled(operation); } bool is_invalid_inequality_operation(const details::operator_type operation) { return settings_.inequality_disabled(operation); } #ifdef exprtk_enable_debugging inline void next_token() { std::string ct_str = current_token().value; parser_helper::next_token(); std::string depth(2 * state_.scope_depth,' '); exprtk_debug(("%s" "prev[%s] --> curr[%s]\n", depth.c_str(), ct_str.c_str(), current_token().value.c_str())); } #endif inline expression_node_ptr parse_corpus() { std::vector<expression_node_ptr> arg_list; std::vector<bool> side_effect_list; expression_node_ptr result = error_node(); scoped_vec_delete<expression_node_t> sdd((*this),arg_list); lexer::token begin_token; lexer::token end_token; for ( ; ; ) { state_.side_effect_present = false; begin_token = current_token(); expression_node_ptr arg = parse_expression(); if (0 == arg) { if (error_list_.empty()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR007 - Invalid expression encountered", exprtk_error_location)); } return error_node(); } else { arg_list.push_back(arg); side_effect_list.push_back(state_.side_effect_present); end_token = current_token(); std::string sub_expr = construct_subexpr(begin_token,end_token); exprtk_debug(("parse_corpus(%02d) Subexpr: %s\n", static_cast<int>(arg_list.size() - 1), sub_expr.c_str())); exprtk_debug(("parse_corpus(%02d) - Side effect present: %s\n", static_cast<int>(arg_list.size() - 1), state_.side_effect_present ? "true" : "false")); exprtk_debug(("-------------------------------------------------\n")); } if (lexer().finished()) break; else if (token_is(token_t::e_eof,prsrhlpr_t::e_hold)) { if (lexer().finished()) break; else next_token(); } } if ( !arg_list.empty() && is_return_node(arg_list.back()) ) { dec_.final_stmt_return_ = true; } result = simplify(arg_list,side_effect_list); sdd.delete_ptr = (0 == result); return result; } std::string construct_subexpr(lexer::token& begin_token, lexer::token& end_token) { std::string result = lexer().substr(begin_token.position,end_token.position); for (std::size_t i = 0; i < result.size(); ++i) { if (details::is_whitespace(result[i])) result[i] = ' '; } return result; } static const precedence_level default_precedence = e_level00; struct state_t { inline void set(const precedence_level& l, const precedence_level& r, const details::operator_type& o) { left = l; right = r; operation = o; } inline void reset() { left = e_level00; right = e_level00; operation = details::e_default; } precedence_level left; precedence_level right; details::operator_type operation; }; inline expression_node_ptr parse_expression(precedence_level precedence = e_level00) { expression_node_ptr expression = parse_branch(precedence); if (0 == expression) { return error_node(); } bool break_loop = false; state_t current_state; for ( ; ; ) { current_state.reset(); switch (current_token().type) { case token_t::e_assign : current_state.set(e_level00,e_level00,details::e_assign); break; case token_t::e_addass : current_state.set(e_level00,e_level00,details::e_addass); break; case token_t::e_subass : current_state.set(e_level00,e_level00,details::e_subass); break; case token_t::e_mulass : current_state.set(e_level00,e_level00,details::e_mulass); break; case token_t::e_divass : current_state.set(e_level00,e_level00,details::e_divass); break; case token_t::e_modass : current_state.set(e_level00,e_level00,details::e_modass); break; case token_t::e_swap : current_state.set(e_level00,e_level00,details::e_swap ); break; case token_t::e_lt : current_state.set(e_level05,e_level06,details:: e_lt); break; case token_t::e_lte : current_state.set(e_level05,e_level06,details:: e_lte); break; case token_t::e_eq : current_state.set(e_level05,e_level06,details:: e_eq); break; case token_t::e_ne : current_state.set(e_level05,e_level06,details:: e_ne); break; case token_t::e_gte : current_state.set(e_level05,e_level06,details:: e_gte); break; case token_t::e_gt : current_state.set(e_level05,e_level06,details:: e_gt); break; case token_t::e_add : current_state.set(e_level07,e_level08,details:: e_add); break; case token_t::e_sub : current_state.set(e_level07,e_level08,details:: e_sub); break; case token_t::e_div : current_state.set(e_level10,e_level11,details:: e_div); break; case token_t::e_mul : current_state.set(e_level10,e_level11,details:: e_mul); break; case token_t::e_mod : current_state.set(e_level10,e_level11,details:: e_mod); break; case token_t::e_pow : current_state.set(e_level12,e_level12,details:: e_pow); break; default : if (token_t::e_symbol == current_token().type) { static const std::string s_and = "and"; static const std::string s_nand = "nand"; static const std::string s_or = "or"; static const std::string s_nor = "nor"; static const std::string s_xor = "xor"; static const std::string s_xnor = "xnor"; static const std::string s_in = "in"; static const std::string s_like = "like"; static const std::string s_ilike = "ilike"; static const std::string s_and1 = "&"; static const std::string s_or1 = "|"; static const std::string s_not = "not"; if (details::imatch(current_token().value,s_and)) { current_state.set(e_level03, e_level04, details::e_and); break; } else if (details::imatch(current_token().value,s_and1)) { #ifndef exprtk_disable_sc_andor current_state.set(e_level03, e_level04, details::e_scand); #else current_state.set(e_level03, e_level04, details::e_and); #endif break; } else if (details::imatch(current_token().value,s_nand)) { current_state.set(e_level03, e_level04, details::e_nand); break; } else if (details::imatch(current_token().value,s_or)) { current_state.set(e_level01, e_level02, details::e_or); break; } else if (details::imatch(current_token().value,s_or1)) { #ifndef exprtk_disable_sc_andor current_state.set(e_level01, e_level02, details::e_scor); #else current_state.set(e_level01, e_level02, details::e_or); #endif break; } else if (details::imatch(current_token().value,s_nor)) { current_state.set(e_level01, e_level02, details::e_nor); break; } else if (details::imatch(current_token().value,s_xor)) { current_state.set(e_level01, e_level02, details::e_xor); break; } else if (details::imatch(current_token().value,s_xnor)) { current_state.set(e_level01, e_level02, details::e_xnor); break; } else if (details::imatch(current_token().value,s_in)) { current_state.set(e_level04, e_level04, details::e_in); break; } else if (details::imatch(current_token().value,s_like)) { current_state.set(e_level04, e_level04, details::e_like); break; } else if (details::imatch(current_token().value,s_ilike)) { current_state.set(e_level04, e_level04, details::e_ilike); break; } else if (details::imatch(current_token().value,s_not)) { break; } } break_loop = true; } if (break_loop) { parse_pending_string_rangesize(expression); break; } else if (current_state.left < precedence) break; lexer::token prev_token = current_token(); next_token(); expression_node_ptr right_branch = error_node(); expression_node_ptr new_expression = error_node(); if (is_invalid_arithmetic_operation(current_state.operation)) { free_node(node_allocator_,expression); set_error( make_error(parser_error::e_syntax, prev_token, "ERR008 - Invalid arithmetic operation '" + details::to_str(current_state.operation) + "'", exprtk_error_location)); return error_node(); } else if (is_invalid_inequality_operation(current_state.operation)) { free_node(node_allocator_,expression); set_error( make_error(parser_error::e_syntax, prev_token, "ERR009 - Invalid inequality operation '" + details::to_str(current_state.operation) + "'", exprtk_error_location)); return error_node(); } else if (is_invalid_assignment_operation(current_state.operation)) { free_node(node_allocator_,expression); set_error( make_error(parser_error::e_syntax, prev_token, "ERR010 - Invalid assignment operation '" + details::to_str(current_state.operation) + "'", exprtk_error_location)); return error_node(); } if (0 != (right_branch = parse_expression(current_state.right))) { if ( details::is_return_node( expression) || details::is_return_node(right_branch) ) { free_node(node_allocator_, expression); free_node(node_allocator_, right_branch); set_error( make_error(parser_error::e_syntax, prev_token, "ERR011 - Return statements cannot be part of sub-expressions", exprtk_error_location)); return error_node(); } new_expression = expression_generator_ ( current_state.operation, expression, right_branch ); } if (0 == new_expression) { if (error_list_.empty()) { set_error( make_error(parser_error::e_syntax, prev_token, !synthesis_error_.empty() ? synthesis_error_ : "ERR012 - General parsing error at token: '" + prev_token.value + "'", exprtk_error_location)); } free_node(node_allocator_, expression); free_node(node_allocator_, right_branch); return error_node(); } else { if ( token_is(token_t::e_ternary,prsrhlpr_t::e_hold) && (precedence == e_level00) ) { expression = parse_ternary_conditional_statement(new_expression); } else expression = new_expression; parse_pending_string_rangesize(expression); } } return expression; } bool simplify_unary_negation_branch(expression_node_ptr& node) { { typedef details::unary_branch_node<T,details::neg_op<T> > ubn_t; ubn_t* n = dynamic_cast<ubn_t*>(node); if (n) { expression_node_ptr un_r = n->branch(0); n->release(); free_node(node_allocator_,node); node = un_r; return true; } } { typedef details::unary_variable_node<T,details::neg_op<T> > uvn_t; uvn_t* n = dynamic_cast<uvn_t*>(node); if (n) { const T& v = n->v(); expression_node_ptr return_node = error_node(); if ( (0 != (return_node = symtab_store_.get_variable(v))) || (0 != (return_node = sem_ .get_variable(v))) ) { free_node(node_allocator_,node); node = return_node; return true; } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR013 - Failed to find variable node in symbol table", exprtk_error_location)); free_node(node_allocator_,node); return false; } } } return false; } static inline expression_node_ptr error_node() { return reinterpret_cast<expression_node_ptr>(0); } template <typename Type, std::size_t N> struct scoped_delete { typedef Type* ptr_t; scoped_delete(parser<T>& pr, ptr_t& p) : delete_ptr(true), parser_(pr), p_(&p) {} scoped_delete(parser<T>& pr, ptr_t (&p)[N]) : delete_ptr(true), parser_(pr), p_(&p[0]) {} ~scoped_delete() { if (delete_ptr) { for (std::size_t i = 0; i < N; ++i) { free_node(parser_.node_allocator_,p_[i]); } } } bool delete_ptr; parser<T>& parser_; ptr_t* p_; private: scoped_delete<Type,N>& operator=(const scoped_delete<Type,N>&); }; template <typename Type> struct scoped_deq_delete { typedef Type* ptr_t; scoped_deq_delete(parser<T>& pr, std::deque<ptr_t>& deq) : delete_ptr(true), parser_(pr), deq_(deq) {} ~scoped_deq_delete() { if (delete_ptr && !deq_.empty()) { for (std::size_t i = 0; i < deq_.size(); ++i) { free_node(parser_.node_allocator_,deq_[i]); } deq_.clear(); } } bool delete_ptr; parser<T>& parser_; std::deque<ptr_t>& deq_; private: scoped_deq_delete<Type>& operator=(const scoped_deq_delete<Type>&); }; template <typename Type> struct scoped_vec_delete { typedef Type* ptr_t; scoped_vec_delete(parser<T>& pr, std::vector<ptr_t>& vec) : delete_ptr(true), parser_(pr), vec_(vec) {} ~scoped_vec_delete() { if (delete_ptr && !vec_.empty()) { for (std::size_t i = 0; i < vec_.size(); ++i) { free_node(parser_.node_allocator_,vec_[i]); } vec_.clear(); } } bool delete_ptr; parser<T>& parser_; std::vector<ptr_t>& vec_; private: scoped_vec_delete<Type>& operator=(const scoped_vec_delete<Type>&); }; struct scoped_bool_negator { scoped_bool_negator(bool& bb) : b(bb) { b = !b; } ~scoped_bool_negator() { b = !b; } bool& b; }; struct scoped_bool_or_restorer { scoped_bool_or_restorer(bool& bb) : b(bb), original_value_(bb) {} ~scoped_bool_or_restorer() { b = b || original_value_; } bool& b; bool original_value_; }; inline expression_node_ptr parse_function_invocation(ifunction<T>* function, const std::string& function_name) { expression_node_ptr func_node = reinterpret_cast<expression_node_ptr>(0); switch (function->param_count) { case 0 : func_node = parse_function_call_0 (function,function_name); break; case 1 : func_node = parse_function_call< 1>(function,function_name); break; case 2 : func_node = parse_function_call< 2>(function,function_name); break; case 3 : func_node = parse_function_call< 3>(function,function_name); break; case 4 : func_node = parse_function_call< 4>(function,function_name); break; case 5 : func_node = parse_function_call< 5>(function,function_name); break; case 6 : func_node = parse_function_call< 6>(function,function_name); break; case 7 : func_node = parse_function_call< 7>(function,function_name); break; case 8 : func_node = parse_function_call< 8>(function,function_name); break; case 9 : func_node = parse_function_call< 9>(function,function_name); break; case 10 : func_node = parse_function_call<10>(function,function_name); break; case 11 : func_node = parse_function_call<11>(function,function_name); break; case 12 : func_node = parse_function_call<12>(function,function_name); break; case 13 : func_node = parse_function_call<13>(function,function_name); break; case 14 : func_node = parse_function_call<14>(function,function_name); break; case 15 : func_node = parse_function_call<15>(function,function_name); break; case 16 : func_node = parse_function_call<16>(function,function_name); break; case 17 : func_node = parse_function_call<17>(function,function_name); break; case 18 : func_node = parse_function_call<18>(function,function_name); break; case 19 : func_node = parse_function_call<19>(function,function_name); break; case 20 : func_node = parse_function_call<20>(function,function_name); break; default : { set_error( make_error(parser_error::e_syntax, current_token(), "ERR014 - Invalid number of parameters for function: '" + function_name + "'", exprtk_error_location)); return error_node(); } } if (func_node) return func_node; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR015 - Failed to generate call to function: '" + function_name + "'", exprtk_error_location)); return error_node(); } } template <std::size_t NumberofParameters> inline expression_node_ptr parse_function_call(ifunction<T>* function, const std::string& function_name) { #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4127) #endif if (0 == NumberofParameters) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR016 - Expecting ifunction '" + function_name + "' to have non-zero parameter count", exprtk_error_location)); return error_node(); } #ifdef _MSC_VER #pragma warning(pop) #endif expression_node_ptr branch[NumberofParameters]; expression_node_ptr result = error_node(); std::fill_n(branch, NumberofParameters, reinterpret_cast<expression_node_ptr>(0)); scoped_delete<expression_node_t,NumberofParameters> sd((*this),branch); next_token(); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR017 - Expecting argument list for function: '" + function_name + "'", exprtk_error_location)); return error_node(); } for (int i = 0; i < static_cast<int>(NumberofParameters); ++i) { branch[i] = parse_expression(); if (0 == branch[i]) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR018 - Failed to parse argument " + details::to_str(i) + " for function: '" + function_name + "'", exprtk_error_location)); return error_node(); } else if (i < static_cast<int>(NumberofParameters - 1)) { if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR019 - Invalid number of arguments for function: '" + function_name + "'", exprtk_error_location)); return error_node(); } } } if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR020 - Invalid number of arguments for function: '" + function_name + "'", exprtk_error_location)); return error_node(); } else result = expression_generator_.function(function,branch); sd.delete_ptr = false; return result; } inline expression_node_ptr parse_function_call_0(ifunction<T>* function, const std::string& function_name) { expression_node_ptr result = expression_generator_.function(function); state_.side_effect_present = function->has_side_effects(); next_token(); if ( token_is(token_t::e_lbracket) && !token_is(token_t::e_rbracket) ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR021 - Expecting '()' to proceed call to function: '" + function_name + "'", exprtk_error_location)); free_node(node_allocator_,result); return error_node(); } else return result; } template <std::size_t MaxNumberofParameters> inline std::size_t parse_base_function_call(expression_node_ptr (¶m_list)[MaxNumberofParameters], const std::string& function_name = "") { std::fill_n(param_list, MaxNumberofParameters, reinterpret_cast<expression_node_ptr>(0)); scoped_delete<expression_node_t,MaxNumberofParameters> sd((*this),param_list); next_token(); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR022 - Expected a '(' at start of function call to '" + function_name + "', instead got: '" + current_token().value + "'", exprtk_error_location)); return 0; } if (token_is(token_t::e_rbracket, e_hold)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR023 - Expected at least one input parameter for function call '" + function_name + "'", exprtk_error_location)); return 0; } std::size_t param_index = 0; for (; param_index < MaxNumberofParameters; ++param_index) { param_list[param_index] = parse_expression(); if (0 == param_list[param_index]) return 0; else if (token_is(token_t::e_rbracket)) { sd.delete_ptr = false; break; } else if (token_is(token_t::e_comma)) continue; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR024 - Expected a ',' between function input parameters, instead got: '" + current_token().value + "'", exprtk_error_location)); return 0; } } if (sd.delete_ptr) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR025 - Invalid number of input parameters passed to function '" + function_name + "'", exprtk_error_location)); return 0; } return (param_index + 1); } inline expression_node_ptr parse_base_operation() { typedef std::pair<base_ops_map_t::iterator,base_ops_map_t::iterator> map_range_t; const std::string operation_name = current_token().value; const token_t diagnostic_token = current_token(); map_range_t itr_range = base_ops_map_.equal_range(operation_name); if (0 == std::distance(itr_range.first,itr_range.second)) { set_error( make_error(parser_error::e_syntax, diagnostic_token, "ERR026 - No entry found for base operation: " + operation_name, exprtk_error_location)); return error_node(); } static const std::size_t MaxNumberofParameters = 4; expression_node_ptr param_list[MaxNumberofParameters] = {0}; const std::size_t parameter_count = parse_base_function_call(param_list, operation_name); if ((parameter_count > 0) && (parameter_count <= MaxNumberofParameters)) { for (base_ops_map_t::iterator itr = itr_range.first; itr != itr_range.second; ++itr) { details::base_operation_t& operation = itr->second; if (operation.num_params == parameter_count) { switch (parameter_count) { #define base_opr_case(N) \ case N : { \ expression_node_ptr pl##N[N] = {0}; \ std::copy(param_list, param_list + N, pl##N); \ lodge_symbol(operation_name, e_st_function); \ return expression_generator_(operation.type, pl##N); \ } \ base_opr_case(1) base_opr_case(2) base_opr_case(3) base_opr_case(4) #undef base_opr_case } } } } for (std::size_t i = 0; i < MaxNumberofParameters; ++i) { free_node(node_allocator_, param_list[i]); } set_error( make_error(parser_error::e_syntax, diagnostic_token, "ERR027 - Invalid number of input parameters for call to function: '" + operation_name + "'", exprtk_error_location)); return error_node(); } inline expression_node_ptr parse_conditional_statement_01(expression_node_ptr condition) { // Parse: [if][(][condition][,][consequent][,][alternative][)] expression_node_ptr consequent = error_node(); expression_node_ptr alternative = error_node(); bool result = true; if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR028 - Expected ',' between if-statement condition and consequent", exprtk_error_location)); result = false; } else if (0 == (consequent = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR029 - Failed to parse consequent for if-statement", exprtk_error_location)); result = false; } else if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR030 - Expected ',' between if-statement consequent and alternative", exprtk_error_location)); result = false; } else if (0 == (alternative = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR031 - Failed to parse alternative for if-statement", exprtk_error_location)); result = false; } else if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR032 - Expected ')' at the end of if-statement", exprtk_error_location)); result = false; } #ifndef exprtk_disable_string_capabilities if (result) { const bool consq_is_str = is_generally_string_node( consequent); const bool alter_is_str = is_generally_string_node(alternative); if (consq_is_str || alter_is_str) { if (consq_is_str && alter_is_str) { return expression_generator_ .conditional_string(condition,consequent,alternative); } set_error( make_error(parser_error::e_syntax, current_token(), "ERR033 - Return types of ternary if-statement differ", exprtk_error_location)); result = false; } } #endif if (!result) { free_node(node_allocator_, condition); free_node(node_allocator_, consequent); free_node(node_allocator_,alternative); return error_node(); } else return expression_generator_ .conditional(condition,consequent,alternative); } inline expression_node_ptr parse_conditional_statement_02(expression_node_ptr condition) { expression_node_ptr consequent = error_node(); expression_node_ptr alternative = error_node(); bool result = true; if (token_is(token_t::e_lcrlbracket,prsrhlpr_t::e_hold)) { if (0 == (consequent = parse_multi_sequence("if-statement-01"))) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR034 - Failed to parse body of consequent for if-statement", exprtk_error_location)); result = false; } } else { if ( settings_.commutative_check_enabled() && token_is(token_t::e_mul,prsrhlpr_t::e_hold) ) { next_token(); } if (0 != (consequent = parse_expression())) { if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR035 - Expected ';' at the end of the consequent for if-statement", exprtk_error_location)); result = false; } } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR036 - Failed to parse body of consequent for if-statement", exprtk_error_location)); result = false; } } if (result) { if (details::imatch(current_token().value,"else")) { next_token(); if (token_is(token_t::e_lcrlbracket,prsrhlpr_t::e_hold)) { if (0 == (alternative = parse_multi_sequence("else-statement-01"))) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR037 - Failed to parse body of the 'else' for if-statement", exprtk_error_location)); result = false; } } else if (details::imatch(current_token().value,"if")) { if (0 == (alternative = parse_conditional_statement())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR038 - Failed to parse body of if-else statement", exprtk_error_location)); result = false; } } else if (0 != (alternative = parse_expression())) { if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR039 - Expected ';' at the end of the 'else-if' for the if-statement", exprtk_error_location)); result = false; } } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR040 - Failed to parse body of the 'else' for if-statement", exprtk_error_location)); result = false; } } } #ifndef exprtk_disable_string_capabilities if (result) { const bool consq_is_str = is_generally_string_node( consequent); const bool alter_is_str = is_generally_string_node(alternative); if (consq_is_str || alter_is_str) { if (consq_is_str && alter_is_str) { return expression_generator_ .conditional_string(condition, consequent, alternative); } set_error( make_error(parser_error::e_syntax, current_token(), "ERR041 - Return types of ternary if-statement differ", exprtk_error_location)); result = false; } } #endif if (!result) { free_node(node_allocator_, condition); free_node(node_allocator_, consequent); free_node(node_allocator_, alternative); return error_node(); } else return expression_generator_ .conditional(condition, consequent, alternative); } inline expression_node_ptr parse_conditional_statement() { expression_node_ptr condition = error_node(); next_token(); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR042 - Expected '(' at start of if-statement, instead got: '" + current_token().value + "'", exprtk_error_location)); return error_node(); } else if (0 == (condition = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR043 - Failed to parse condition for if-statement", exprtk_error_location)); return error_node(); } else if (token_is(token_t::e_comma,prsrhlpr_t::e_hold)) { // if (x,y,z) return parse_conditional_statement_01(condition); } else if (token_is(token_t::e_rbracket)) { // 00. if (x) y; // 01. if (x) y; else z; // 02. if (x) y; else {z0; ... zn;} // 03. if (x) y; else if (z) w; // 04. if (x) y; else if (z) w; else u; // 05. if (x) y; else if (z) w; else {u0; ... un;} // 06. if (x) y; else if (z) {w0; ... wn;} // 07. if (x) {y0; ... yn;} // 08. if (x) {y0; ... yn;} else z; // 09. if (x) {y0; ... yn;} else {z0; ... zn;}; // 10. if (x) {y0; ... yn;} else if (z) w; // 11. if (x) {y0; ... yn;} else if (z) w; else u; // 12. if (x) {y0; ... nex;} else if (z) w; else {u0 ... un;} // 13. if (x) {y0; ... yn;} else if (z) {w0; ... wn;} return parse_conditional_statement_02(condition); } set_error( make_error(parser_error::e_syntax, current_token(), "ERR044 - Invalid if-statement", exprtk_error_location)); free_node(node_allocator_,condition); return error_node(); } inline expression_node_ptr parse_ternary_conditional_statement(expression_node_ptr condition) { // Parse: [condition][?][consequent][:][alternative] expression_node_ptr consequent = error_node(); expression_node_ptr alternative = error_node(); bool result = true; if (0 == condition) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR045 - Encountered invalid condition branch for ternary if-statement", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_ternary)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR046 - Expected '?' after condition of ternary if-statement", exprtk_error_location)); result = false; } else if (0 == (consequent = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR047 - Failed to parse consequent for ternary if-statement", exprtk_error_location)); result = false; } else if (!token_is(token_t::e_colon)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR048 - Expected ':' between ternary if-statement consequent and alternative", exprtk_error_location)); result = false; } else if (0 == (alternative = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR049 - Failed to parse alternative for ternary if-statement", exprtk_error_location)); result = false; } #ifndef exprtk_disable_string_capabilities if (result) { const bool consq_is_str = is_generally_string_node( consequent); const bool alter_is_str = is_generally_string_node(alternative); if (consq_is_str || alter_is_str) { if (consq_is_str && alter_is_str) { return expression_generator_ .conditional_string(condition, consequent, alternative); } set_error( make_error(parser_error::e_syntax, current_token(), "ERR050 - Return types of ternary if-statement differ", exprtk_error_location)); result = false; } } #endif if (!result) { free_node(node_allocator_, condition); free_node(node_allocator_, consequent); free_node(node_allocator_, alternative); return error_node(); } else return expression_generator_ .conditional(condition, consequent, alternative); } inline expression_node_ptr parse_while_loop() { // Parse: [while][(][test expr][)][{][expression][}] expression_node_ptr condition = error_node(); expression_node_ptr branch = error_node(); expression_node_ptr result_node = error_node(); bool result = true; next_token(); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR051 - Expected '(' at start of while-loop condition statement", exprtk_error_location)); return error_node(); } else if (0 == (condition = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR052 - Failed to parse condition for while-loop", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR053 - Expected ')' at end of while-loop condition statement", exprtk_error_location)); result = false; } brkcnt_list_.push_front(false); if (result) { if (0 == (branch = parse_multi_sequence("while-loop"))) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR054 - Failed to parse body of while-loop")); result = false; } else if (0 == (result_node = expression_generator_.while_loop(condition, branch, brkcnt_list_.front()))) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR055 - Failed to synthesize while-loop", exprtk_error_location)); result = false; } } if (!result) { free_node(node_allocator_, branch); free_node(node_allocator_, condition); free_node(node_allocator_, result_node); brkcnt_list_.pop_front(); return error_node(); } else return result_node; } inline expression_node_ptr parse_repeat_until_loop() { // Parse: [repeat][{][expression][}][until][(][test expr][)] expression_node_ptr condition = error_node(); expression_node_ptr branch = error_node(); next_token(); std::vector<expression_node_ptr> arg_list; std::vector<bool> side_effect_list; scoped_vec_delete<expression_node_t> sdd((*this),arg_list); brkcnt_list_.push_front(false); if (details::imatch(current_token().value,"until")) { next_token(); branch = node_allocator_.allocate<details::null_node<T> >(); } else { token_t::token_type seperator = token_t::e_eof; scope_handler sh(*this); scoped_bool_or_restorer sbr(state_.side_effect_present); for ( ; ; ) { state_.side_effect_present = false; expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); else { arg_list.push_back(arg); side_effect_list.push_back(state_.side_effect_present); } if (details::imatch(current_token().value,"until")) { next_token(); break; } bool is_next_until = peek_token_is(token_t::e_symbol) && peek_token_is("until"); if (!token_is(seperator) && is_next_until) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR056 - Expected '" + token_t::to_str(seperator) + "' in body of repeat until loop", exprtk_error_location)); return error_node(); } if (details::imatch(current_token().value,"until")) { next_token(); break; } } branch = simplify(arg_list,side_effect_list); sdd.delete_ptr = (0 == branch); if (sdd.delete_ptr) { brkcnt_list_.pop_front(); set_error( make_error(parser_error::e_syntax, current_token(), "ERR057 - Failed to parse body of repeat until loop", exprtk_error_location)); return error_node(); } } if (!token_is(token_t::e_lbracket)) { brkcnt_list_.pop_front(); set_error( make_error(parser_error::e_syntax, current_token(), "ERR058 - Expected '(' before condition statement of repeat until loop", exprtk_error_location)); free_node(node_allocator_,branch); return error_node(); } else if (0 == (condition = parse_expression())) { brkcnt_list_.pop_front(); set_error( make_error(parser_error::e_syntax, current_token(), "ERR059 - Failed to parse condition for repeat until loop", exprtk_error_location)); free_node(node_allocator_,branch); return error_node(); } else if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR060 - Expected ')' after condition of repeat until loop", exprtk_error_location)); free_node(node_allocator_, branch); free_node(node_allocator_, condition); brkcnt_list_.pop_front(); return error_node(); } expression_node_ptr result; result = expression_generator_ .repeat_until_loop(condition, branch, brkcnt_list_.front()); if (0 == result) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR061 - Failed to synthesize repeat until loop", exprtk_error_location)); free_node(node_allocator_,condition); brkcnt_list_.pop_front(); return error_node(); } else { brkcnt_list_.pop_front(); return result; } } inline expression_node_ptr parse_for_loop() { expression_node_ptr initialiser = error_node(); expression_node_ptr condition = error_node(); expression_node_ptr incrementor = error_node(); expression_node_ptr loop_body = error_node(); scope_element* se = 0; bool result = true; std::string loop_counter_symbol; next_token(); scope_handler sh(*this); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR062 - Expected '(' at start of for-loop", exprtk_error_location)); return error_node(); } if (!token_is(token_t::e_eof)) { if ( !token_is(token_t::e_symbol,prsrhlpr_t::e_hold) && details::imatch(current_token().value,"var") ) { next_token(); if (!token_is(token_t::e_symbol,prsrhlpr_t::e_hold)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR063 - Expected a variable at the start of initialiser section of for-loop", exprtk_error_location)); return error_node(); } else if (!peek_token_is(token_t::e_assign)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR064 - Expected variable assignment of initialiser section of for-loop", exprtk_error_location)); return error_node(); } loop_counter_symbol = current_token().value; se = &sem_.get_element(loop_counter_symbol); if ((se->name == loop_counter_symbol) && se->active) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR065 - For-loop variable '" + loop_counter_symbol+ "' is being shadowed by a previous declaration", exprtk_error_location)); return error_node(); } else if (!symtab_store_.is_variable(loop_counter_symbol)) { if ( !se->active && (se->name == loop_counter_symbol) && (se->type == scope_element::e_variable) ) { se->active = true; se->ref_count++; } else { scope_element nse; nse.name = loop_counter_symbol; nse.active = true; nse.ref_count = 1; nse.type = scope_element::e_variable; nse.depth = state_.scope_depth; nse.data = new T(T(0)); nse.var_node = node_allocator_.allocate<variable_node_t>(*(T*)(nse.data)); if (!sem_.add_element(nse)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR066 - Failed to add new local variable '" + loop_counter_symbol + "' to SEM", exprtk_error_location)); sem_.free_element(nse); result = false; } else { exprtk_debug(("parse_for_loop() - INFO - Added new local variable: %s\n",nse.name.c_str())); state_.activate_side_effect("parse_for_loop()"); } } } } if (0 == (initialiser = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR067 - Failed to parse initialiser of for-loop", exprtk_error_location)); result = false; } else if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR068 - Expected ';' after initialiser of for-loop", exprtk_error_location)); result = false; } } if (!token_is(token_t::e_eof)) { if (0 == (condition = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR069 - Failed to parse condition of for-loop", exprtk_error_location)); result = false; } else if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR070 - Expected ';' after condition section of for-loop", exprtk_error_location)); result = false; } } if (!token_is(token_t::e_rbracket)) { if (0 == (incrementor = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR071 - Failed to parse incrementor of for-loop", exprtk_error_location)); result = false; } else if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR072 - Expected ')' after incrementor section of for-loop", exprtk_error_location)); result = false; } } if (result) { brkcnt_list_.push_front(false); if (0 == (loop_body = parse_multi_sequence("for-loop"))) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR073 - Failed to parse body of for-loop", exprtk_error_location)); result = false; } } if (!result) { if (se) { se->ref_count--; } sem_.cleanup(); free_node(node_allocator_, initialiser); free_node(node_allocator_, condition); free_node(node_allocator_, incrementor); free_node(node_allocator_, loop_body); if (!brkcnt_list_.empty()) { brkcnt_list_.pop_front(); } return error_node(); } else { expression_node_ptr result_node = expression_generator_.for_loop(initialiser, condition, incrementor, loop_body, brkcnt_list_.front()); brkcnt_list_.pop_front(); return result_node; } } inline expression_node_ptr parse_switch_statement() { std::vector<expression_node_ptr> arg_list; expression_node_ptr result = error_node(); if (!details::imatch(current_token().value,"switch")) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR074 - Expected keyword 'switch'", exprtk_error_location)); return error_node(); } scoped_vec_delete<expression_node_t> svd((*this),arg_list); next_token(); if (!token_is(token_t::e_lcrlbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR075 - Expected '{' for call to switch statement", exprtk_error_location)); return error_node(); } for ( ; ; ) { if (!details::imatch("case",current_token().value)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR076 - Expected either a 'case' or 'default' statement", exprtk_error_location)); return error_node(); } next_token(); expression_node_ptr condition = parse_expression(); if (0 == condition) return error_node(); else if (!token_is(token_t::e_colon)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR077 - Expected ':' for case of switch statement", exprtk_error_location)); return error_node(); } expression_node_ptr consequent = parse_expression(); if (0 == consequent) return error_node(); else if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR078 - Expected ';' at end of case for switch statement", exprtk_error_location)); return error_node(); } // Can we optimise away the case statement? if (is_constant_node(condition) && is_false(condition)) { free_node(node_allocator_, condition); free_node(node_allocator_, consequent); } else { arg_list.push_back( condition); arg_list.push_back(consequent); } if (details::imatch("default",current_token().value)) { next_token(); if (!token_is(token_t::e_colon)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR079 - Expected ':' for default of switch statement", exprtk_error_location)); return error_node(); } expression_node_ptr default_statement = error_node(); if (token_is(token_t::e_lcrlbracket,prsrhlpr_t::e_hold)) default_statement = parse_multi_sequence("switch-default"); else default_statement = parse_expression(); if (0 == default_statement) return error_node(); else if (!token_is(token_t::e_eof)) { free_node(node_allocator_,default_statement); set_error( make_error(parser_error::e_syntax, current_token(), "ERR080 - Expected ';' at end of default for switch statement", exprtk_error_location)); return error_node(); } arg_list.push_back(default_statement); break; } } if (!token_is(token_t::e_rcrlbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR081 - Expected '}' at end of switch statement", exprtk_error_location)); return error_node(); } result = expression_generator_.switch_statement(arg_list); svd.delete_ptr = (0 == result); return result; } inline expression_node_ptr parse_multi_switch_statement() { std::vector<expression_node_ptr> arg_list; expression_node_ptr result = error_node(); if (!details::imatch(current_token().value,"[*]")) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR082 - Expected token '[*]'", exprtk_error_location)); return error_node(); } scoped_vec_delete<expression_node_t> svd((*this),arg_list); next_token(); if (!token_is(token_t::e_lcrlbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR083 - Expected '{' for call to [*] statement", exprtk_error_location)); return error_node(); } for ( ; ; ) { if (!details::imatch("case",current_token().value)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR084 - Expected a 'case' statement for multi-switch", exprtk_error_location)); return error_node(); } next_token(); expression_node_ptr condition = parse_expression(); if (0 == condition) return error_node(); if (!token_is(token_t::e_colon)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR085 - Expected ':' for case of [*] statement", exprtk_error_location)); return error_node(); } expression_node_ptr consequent = parse_expression(); if (0 == consequent) return error_node(); if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR086 - Expected ';' at end of case for [*] statement", exprtk_error_location)); return error_node(); } // Can we optimise away the case statement? if (is_constant_node(condition) && is_false(condition)) { free_node(node_allocator_, condition); free_node(node_allocator_, consequent); } else { arg_list.push_back(condition); arg_list.push_back(consequent); } if (token_is(token_t::e_rcrlbracket,prsrhlpr_t::e_hold)) { break; } } if (!token_is(token_t::e_rcrlbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR087 - Expected '}' at end of [*] statement", exprtk_error_location)); return error_node(); } result = expression_generator_.multi_switch_statement(arg_list); svd.delete_ptr = (0 == result); return result; } inline expression_node_ptr parse_vararg_function() { std::vector<expression_node_ptr> arg_list; expression_node_ptr result = error_node(); details::operator_type opt_type = details::e_default; const std::string symbol = current_token().value; if (details::imatch(symbol,"~")) { next_token(); return parse_multi_sequence(); } else if (details::imatch(symbol,"[*]")) { return parse_multi_switch_statement(); } else if (details::imatch(symbol, "avg" )) opt_type = details::e_avg ; else if (details::imatch(symbol, "mand")) opt_type = details::e_mand; else if (details::imatch(symbol, "max" )) opt_type = details::e_max ; else if (details::imatch(symbol, "min" )) opt_type = details::e_min ; else if (details::imatch(symbol, "mor" )) opt_type = details::e_mor ; else if (details::imatch(symbol, "mul" )) opt_type = details::e_prod; else if (details::imatch(symbol, "sum" )) opt_type = details::e_sum ; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR088 - Unsupported vararg function: " + symbol, exprtk_error_location)); return error_node(); } scoped_vec_delete<expression_node_t> sdd((*this),arg_list); lodge_symbol(symbol,e_st_function); next_token(); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR089 - Expected '(' for call to vararg function: " + symbol, exprtk_error_location)); return error_node(); } for ( ; ; ) { expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); else arg_list.push_back(arg); if (token_is(token_t::e_rbracket)) break; else if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR090 - Expected ',' for call to vararg function: " + symbol, exprtk_error_location)); return error_node(); } } result = expression_generator_.vararg_function(opt_type,arg_list); sdd.delete_ptr = (0 == result); return result; } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr parse_string_range_statement(expression_node_ptr& expression) { if (!token_is(token_t::e_lsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR091 - Expected '[' as start of string range definition", exprtk_error_location)); free_node(node_allocator_,expression); return error_node(); } else if (token_is(token_t::e_rsqrbracket)) { return node_allocator_.allocate<details::string_size_node<T> >(expression); } range_t rp; if (!parse_range(rp,true)) { free_node(node_allocator_,expression); return error_node(); } expression_node_ptr result = expression_generator_(expression,rp); if (0 == result) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR092 - Failed to generate string range node", exprtk_error_location)); free_node(node_allocator_,expression); } rp.clear(); return result; } #else inline expression_node_ptr parse_string_range_statement(expression_node_ptr&) { return error_node(); } #endif inline void parse_pending_string_rangesize(expression_node_ptr& expression) { // Allow no more than 100 range calls, eg: s[][][]...[][] const std::size_t max_rangesize_parses = 100; std::size_t i = 0; while ( (0 != expression) && (i++ < max_rangesize_parses) && error_list_.empty() && is_generally_string_node(expression) && token_is(token_t::e_lsqrbracket,prsrhlpr_t::e_hold) ) { expression = parse_string_range_statement(expression); } } template <typename Allocator1, typename Allocator2, template <typename,typename> class Sequence> inline expression_node_ptr simplify(Sequence<expression_node_ptr,Allocator1>& expression_list, Sequence<bool,Allocator2>& side_effect_list, const bool specialise_on_final_type = false) { if (expression_list.empty()) return error_node(); else if (1 == expression_list.size()) return expression_list[0]; Sequence<expression_node_ptr,Allocator1> tmp_expression_list; bool return_node_present = false; for (std::size_t i = 0; i < (expression_list.size() - 1); ++i) { if (is_variable_node(expression_list[i])) continue; else if ( is_return_node (expression_list[i]) || is_break_node (expression_list[i]) || is_continue_node(expression_list[i]) ) { tmp_expression_list.push_back(expression_list[i]); // Remove all subexpressions after first short-circuit // node has been encountered. for (std::size_t j = i + 1; j < expression_list.size(); ++j) { free_node(node_allocator_,expression_list[j]); } return_node_present = true; break; } else if ( is_constant_node(expression_list[i]) || is_null_node (expression_list[i]) || !side_effect_list[i] ) { free_node(node_allocator_,expression_list[i]); continue; } else tmp_expression_list.push_back(expression_list[i]); } if (!return_node_present) { tmp_expression_list.push_back(expression_list.back()); } expression_list.swap(tmp_expression_list); if (tmp_expression_list.size() > expression_list.size()) { exprtk_debug(("simplify() - Reduced subexpressions from %d to %d\n", static_cast<int>(tmp_expression_list.size()), static_cast<int>(expression_list .size()))); } if ( return_node_present || side_effect_list.back() || (expression_list.size() > 1) ) state_.activate_side_effect("simplify()"); if (1 == expression_list.size()) return expression_list[0]; else if (specialise_on_final_type && is_generally_string_node(expression_list.back())) return expression_generator_.vararg_function(details::e_smulti,expression_list); else return expression_generator_.vararg_function(details::e_multi,expression_list); } inline expression_node_ptr parse_multi_sequence(const std::string& source = "") { token_t::token_type close_bracket = token_t::e_rcrlbracket; token_t::token_type seperator = token_t::e_eof; if (!token_is(token_t::e_lcrlbracket)) { if (token_is(token_t::e_lbracket)) { close_bracket = token_t::e_rbracket; seperator = token_t::e_comma; } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR093 - Expected '" + token_t::to_str(close_bracket) + "' for call to multi-sequence" + ((!source.empty()) ? std::string(" section of " + source): ""), exprtk_error_location)); return error_node(); } } else if (token_is(token_t::e_rcrlbracket)) { return node_allocator_.allocate<details::null_node<T> >(); } std::vector<expression_node_ptr> arg_list; std::vector<bool> side_effect_list; expression_node_ptr result = error_node(); scoped_vec_delete<expression_node_t> sdd((*this),arg_list); scope_handler sh(*this); scoped_bool_or_restorer sbr(state_.side_effect_present); for ( ; ; ) { state_.side_effect_present = false; expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); else { arg_list.push_back(arg); side_effect_list.push_back(state_.side_effect_present); } if (token_is(close_bracket)) break; bool is_next_close = peek_token_is(close_bracket); if (!token_is(seperator) && is_next_close) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR094 - Expected '" + details::to_str(seperator) + "' for call to multi-sequence section of " + source, exprtk_error_location)); return error_node(); } if (token_is(close_bracket)) break; } result = simplify(arg_list,side_effect_list,source.empty()); sdd.delete_ptr = (0 == result); return result; } inline bool parse_range(range_t& rp, const bool skip_lsqr = false) { // Examples of valid ranges: // 1. [1:5] -> 1..5 // 2. [ :5] -> 0..5 // 3. [1: ] -> 1..end // 4. [x:y] -> x..y where x <= y // 5. [x+1:y/2] -> x+1..y/2 where x+1 <= y/2 // 6. [ :y] -> 0..y where 0 <= y // 7. [x: ] -> x..end where x <= end rp.clear(); if (!skip_lsqr && !token_is(token_t::e_lsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR095 - Expected '[' for start of range", exprtk_error_location)); return false; } if (token_is(token_t::e_colon)) { rp.n0_c.first = true; rp.n0_c.second = 0; rp.cache.first = 0; } else { expression_node_ptr r0 = parse_expression(); if (0 == r0) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR096 - Failed parse begin section of range", exprtk_error_location)); return false; } else if (is_constant_node(r0)) { const T r0_value = r0->value(); if (r0_value >= T(0)) { rp.n0_c.first = true; rp.n0_c.second = static_cast<std::size_t>(details::numeric::to_int64(r0_value)); rp.cache.first = rp.n0_c.second; } free_node(node_allocator_,r0); if (r0_value < T(0)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR097 - Range lower bound less than zero! Constraint: r0 >= 0", exprtk_error_location)); return false; } } else { rp.n0_e.first = true; rp.n0_e.second = r0; } if (!token_is(token_t::e_colon)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR098 - Expected ':' for break in range", exprtk_error_location)); rp.free(); return false; } } if (token_is(token_t::e_rsqrbracket)) { rp.n1_c.first = true; rp.n1_c.second = std::numeric_limits<std::size_t>::max(); } else { expression_node_ptr r1 = parse_expression(); if (0 == r1) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR099 - Failed parse end section of range", exprtk_error_location)); rp.free(); return false; } else if (is_constant_node(r1)) { const T r1_value = r1->value(); if (r1_value >= T(0)) { rp.n1_c.first = true; rp.n1_c.second = static_cast<std::size_t>(details::numeric::to_int64(r1_value)); rp.cache.second = rp.n1_c.second; } free_node(node_allocator_,r1); if (r1_value < T(0)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR100 - Range upper bound less than zero! Constraint: r1 >= 0", exprtk_error_location)); return false; } } else { rp.n1_e.first = true; rp.n1_e.second = r1; } if (!token_is(token_t::e_rsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR101 - Expected ']' for start of range", exprtk_error_location)); rp.free(); return false; } } if (rp.const_range()) { std::size_t r0 = 0; std::size_t r1 = 0; const bool rp_result = rp(r0,r1); if (!rp_result || (r0 > r1)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR102 - Invalid range, Constraint: r0 <= r1", exprtk_error_location)); return false; } } return true; } inline void lodge_symbol(const std::string& symbol, const symbol_type st) { dec_.add_symbol(symbol,st); } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr parse_string() { const std::string symbol = current_token().value; typedef details::stringvar_node<T>* strvar_node_t; expression_node_ptr result = error_node(); strvar_node_t const_str_node = static_cast<strvar_node_t>(0); scope_element& se = sem_.get_active_element(symbol); if (scope_element::e_string == se.type) { se.active = true; result = se.str_node; lodge_symbol(symbol,e_st_local_string); } else { if (!symtab_store_.is_conststr_stringvar(symbol)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR103 - Unknown string symbol", exprtk_error_location)); return error_node(); } result = symtab_store_.get_stringvar(symbol); if (symtab_store_.is_constant_string(symbol)) { const_str_node = static_cast<strvar_node_t>(result); result = expression_generator_(const_str_node->str()); } lodge_symbol(symbol,e_st_string); } if (peek_token_is(token_t::e_lsqrbracket)) { next_token(); if (peek_token_is(token_t::e_rsqrbracket)) { next_token(); next_token(); if (const_str_node) { free_node(node_allocator_,result); return expression_generator_(T(const_str_node->size())); } else return node_allocator_.allocate<details::stringvar_size_node<T> > (static_cast<details::stringvar_node<T>*>(result)->ref()); } range_t rp; if (!parse_range(rp)) { free_node(node_allocator_,result); return error_node(); } else if (const_str_node) { free_node(node_allocator_,result); result = expression_generator_(const_str_node->ref(),rp); } else result = expression_generator_(static_cast<details::stringvar_node<T>*> (result)->ref(), rp); if (result) rp.clear(); } else next_token(); return result; } #else inline expression_node_ptr parse_string() { return error_node(); } #endif #ifndef exprtk_disable_string_capabilities inline expression_node_ptr parse_const_string() { const std::string const_str = current_token().value; expression_node_ptr result = expression_generator_(const_str); if (peek_token_is(token_t::e_lsqrbracket)) { next_token(); if (peek_token_is(token_t::e_rsqrbracket)) { next_token(); next_token(); free_node(node_allocator_,result); return expression_generator_(T(const_str.size())); } range_t rp; if (!parse_range(rp)) { free_node(node_allocator_,result); return error_node(); } free_node(node_allocator_,result); if (rp.n1_c.first && (rp.n1_c.second == std::numeric_limits<std::size_t>::max())) { rp.n1_c.second = const_str.size() - 1; rp.cache.second = rp.n1_c.second; } if ( (rp.n0_c.first && (rp.n0_c.second >= const_str.size())) || (rp.n1_c.first && (rp.n1_c.second >= const_str.size())) ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR104 - Overflow in range for string: '" + const_str + "'[" + (rp.n0_c.first ? details::to_str(static_cast<int>(rp.n0_c.second)) : "?") + ":" + (rp.n1_c.first ? details::to_str(static_cast<int>(rp.n1_c.second)) : "?") + "]", exprtk_error_location)); return error_node(); } result = expression_generator_(const_str,rp); if (result) rp.clear(); } else next_token(); return result; } #else inline expression_node_ptr parse_const_string() { return error_node(); } #endif inline expression_node_ptr parse_vector() { const std::string symbol = current_token().value; vector_holder_ptr vec = vector_holder_ptr(0); const scope_element& se = sem_.get_active_element(symbol); if ( !details::imatch(se.name, symbol) || (se.depth > state_.scope_depth) || (scope_element::e_vector != se.type) ) { if (0 == (vec = symtab_store_.get_vector(symbol))) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR105 - Symbol '" + symbol+ " not a vector", exprtk_error_location)); return error_node(); } } else vec = se.vec_node; expression_node_ptr index_expr = error_node(); next_token(); if (!token_is(token_t::e_lsqrbracket)) { return node_allocator_.allocate<vector_node_t>(vec); } else if (token_is(token_t::e_rsqrbracket)) { return expression_generator_(T(vec->size())); } else if (0 == (index_expr = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR106 - Failed to parse index for vector: '" + symbol + "'", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_rsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR107 - Expected ']' for index of vector: '" + symbol + "'", exprtk_error_location)); free_node(node_allocator_,index_expr); return error_node(); } // Perform compile-time range check if (details::is_constant_node(index_expr)) { const std::size_t index = static_cast<std::size_t>(details::numeric::to_int32(index_expr->value())); const std::size_t vec_size = vec->size(); if (index >= vec_size) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR108 - Index of " + details::to_str(index) + " out of range for " "vector '" + symbol + "' of size " + details::to_str(vec_size), exprtk_error_location)); free_node(node_allocator_,index_expr); return error_node(); } } return expression_generator_.vector_element(symbol,vec,index_expr); } inline expression_node_ptr parse_vararg_function_call(ivararg_function<T>* vararg_function, const std::string& vararg_function_name) { std::vector<expression_node_ptr> arg_list; expression_node_ptr result = error_node(); scoped_vec_delete<expression_node_t> sdd((*this),arg_list); next_token(); if (token_is(token_t::e_lbracket)) { if (token_is(token_t::e_rbracket)) { if (!vararg_function->allow_zero_parameters()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR109 - Zero parameter call to vararg function: " + vararg_function_name + " not allowed", exprtk_error_location)); return error_node(); } } else { for ( ; ; ) { expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); else arg_list.push_back(arg); if (token_is(token_t::e_rbracket)) break; else if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR110 - Expected ',' for call to vararg function: " + vararg_function_name, exprtk_error_location)); return error_node(); } } } } else if (!vararg_function->allow_zero_parameters()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR111 - Zero parameter call to vararg function: " + vararg_function_name + " not allowed", exprtk_error_location)); return error_node(); } if (arg_list.size() < vararg_function->min_num_args()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR112 - Invalid number of parameters to call to vararg function: " + vararg_function_name + ", require at least " + details::to_str(static_cast<int>(vararg_function->min_num_args())) + " parameters", exprtk_error_location)); return error_node(); } else if (arg_list.size() > vararg_function->max_num_args()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR113 - Invalid number of parameters to call to vararg function: " + vararg_function_name + ", require no more than " + details::to_str(static_cast<int>(vararg_function->max_num_args())) + " parameters", exprtk_error_location)); return error_node(); } result = expression_generator_.vararg_function_call(vararg_function,arg_list); sdd.delete_ptr = (0 == result); return result; } class type_checker { public: typedef parser<T> parser_t; typedef std::vector<std::string> param_seq_list_t; type_checker(parser_t& p, const std::string& func_name, const std::string& param_seq) : invalid_state_(true), parser_(p), function_name_(func_name) { split(param_seq); } bool verify(const std::string& param_seq, std::size_t& pseq_index) { if (param_seq_list_.empty()) return true; std::vector<std::pair<std::size_t,char> > error_list; for (std::size_t i = 0; i < param_seq_list_.size(); ++i) { details::char_t diff_value = 0; std::size_t diff_index = 0; bool result = details::sequence_match(param_seq_list_[i], param_seq, diff_index,diff_value); if (result) { pseq_index = i; return true; } else error_list.push_back(std::make_pair(diff_index,diff_value)); } if (1 == error_list.size()) { parser_. set_error( make_error(parser_error::e_syntax, parser_.current_token(), "ERR114 - Failed parameter type check for function '" + function_name_ + "', " "Expected '" + param_seq_list_[0] + "' call set: '" + param_seq +"'", exprtk_error_location)); } else { // find first with largest diff_index; std::size_t max_diff_index = 0; for (std::size_t i = 1; i < error_list.size(); ++i) { if (error_list[i].first > error_list[max_diff_index].first) { max_diff_index = i; } } parser_. set_error( make_error(parser_error::e_syntax, parser_.current_token(), "ERR115 - Failed parameter type check for function '" + function_name_ + "', " "Best match: '" + param_seq_list_[max_diff_index] + "' call set: '" + param_seq +"'", exprtk_error_location)); } return false; } std::size_t paramseq_count() const { return param_seq_list_.size(); } std::string paramseq(const std::size_t& index) const { return param_seq_list_[index]; } bool invalid() const { return !invalid_state_; } bool allow_zero_parameters() const { return param_seq_list_.end() != std::find(param_seq_list_.begin(), param_seq_list_.end(), "Z"); } private: void split(const std::string& s) { if (s.empty()) return; std::size_t start = 0; std::size_t end = 0; param_seq_list_t param_seq_list; struct token_validator { static inline bool process(const std::string& str, std::size_t s, std::size_t e, param_seq_list_t& psl) { if ( (e - s) && (std::string::npos == str.find("?*")) && (std::string::npos == str.find("**")) ) { const std::string curr_str = str.substr(s, e - s); if ("Z" == curr_str) { psl.push_back(curr_str); return true; } else if (std::string::npos == curr_str.find_first_not_of("STV*?|")) { psl.push_back(curr_str); return true; } } return false; } }; while (std::string::npos != (end = s.find('|',start))) { if (!token_validator::process(s, start, end, param_seq_list)) { invalid_state_ = false; const std::string err_param_seq = s.substr(start, end - start); parser_. set_error( make_error(parser_error::e_syntax, parser_.current_token(), "ERR116 - Invalid parameter sequence of '" + err_param_seq + "' for function: " + function_name_, exprtk_error_location)); return; } else start = end + 1; } if (start < s.size()) { if (token_validator::process(s, start, s.size(), param_seq_list)) param_seq_list_ = param_seq_list; else { const std::string err_param_seq = s.substr(start, s.size() - start); parser_. set_error( make_error(parser_error::e_syntax, parser_.current_token(), "ERR117 - Invalid parameter sequence of '" + err_param_seq + "' for function: " + function_name_, exprtk_error_location)); return; } } } type_checker(const type_checker&); type_checker& operator=(const type_checker&); bool invalid_state_; parser_t& parser_; std::string function_name_; param_seq_list_t param_seq_list_; }; inline expression_node_ptr parse_generic_function_call(igeneric_function<T>* function, const std::string& function_name) { std::vector<expression_node_ptr> arg_list; scoped_vec_delete<expression_node_t> sdd((*this),arg_list); next_token(); std::string param_type_list; type_checker tc((*this), function_name, function->parameter_sequence); if (tc.invalid()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR118 - Type checker instantiation failure for generic function: " + function_name, exprtk_error_location)); return error_node(); } if ( !function->parameter_sequence.empty() && function->allow_zero_parameters () && !tc .allow_zero_parameters () ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR119 - Mismatch in zero parameter condition for generic function: " + function_name, exprtk_error_location)); return error_node(); } if (token_is(token_t::e_lbracket)) { if (token_is(token_t::e_rbracket)) { if ( !function->allow_zero_parameters() && !tc .allow_zero_parameters() ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR120 - Zero parameter call to generic function: " + function_name + " not allowed", exprtk_error_location)); return error_node(); } } else { for ( ; ; ) { expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); if (is_ivector_node(arg)) param_type_list += 'V'; else if (is_generally_string_node(arg)) param_type_list += 'S'; else // Everything else is assumed to be a scalar returning expression param_type_list += 'T'; arg_list.push_back(arg); if (token_is(token_t::e_rbracket)) break; else if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR121 - Expected ',' for call to generic function: " + function_name, exprtk_error_location)); return error_node(); } } } } else if ( !function->parameter_sequence.empty() && function->allow_zero_parameters () && !tc .allow_zero_parameters () ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR122 - Zero parameter call to generic function: " + function_name + " not allowed", exprtk_error_location)); return error_node(); } std::size_t param_seq_index = 0; if ( state_.type_check_enabled && !tc.verify(param_type_list, param_seq_index) ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR123 - Expected ',' for call to generic function: " + function_name, exprtk_error_location)); return error_node(); } expression_node_ptr result = error_node(); if (tc.paramseq_count() <= 1) result = expression_generator_ .generic_function_call(function, arg_list); else result = expression_generator_ .generic_function_call(function, arg_list, param_seq_index); sdd.delete_ptr = (0 == result); return result; } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr parse_string_function_call(igeneric_function<T>* function, const std::string& function_name) { std::vector<expression_node_ptr> arg_list; scoped_vec_delete<expression_node_t> sdd((*this),arg_list); next_token(); std::string param_type_list; type_checker tc((*this), function_name, function->parameter_sequence); if ( (!function->parameter_sequence.empty()) && (0 == tc.paramseq_count()) ) { return error_node(); } if (token_is(token_t::e_lbracket)) { if (!token_is(token_t::e_rbracket)) { for ( ; ; ) { expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); if (is_ivector_node(arg)) param_type_list += 'V'; else if (is_generally_string_node(arg)) param_type_list += 'S'; else // Everything else is a scalar returning expression param_type_list += 'T'; arg_list.push_back(arg); if (token_is(token_t::e_rbracket)) break; else if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR124 - Expected ',' for call to string function: " + function_name, exprtk_error_location)); return error_node(); } } } } std::size_t param_seq_index = 0; if (!tc.verify(param_type_list, param_seq_index)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR125 - Expected ',' for call to string function: " + function_name, exprtk_error_location)); return error_node(); } expression_node_ptr result = error_node(); if (tc.paramseq_count() <= 1) result = expression_generator_ .string_function_call(function, arg_list); else result = expression_generator_ .string_function_call(function, arg_list, param_seq_index); sdd.delete_ptr = (0 == result); return result; } #endif template <typename Type, std::size_t NumberOfParameters> struct parse_special_function_impl { static inline expression_node_ptr process(parser<Type>& p,const details::operator_type opt_type, const std::string& sf_name) { expression_node_ptr branch[NumberOfParameters]; expression_node_ptr result = error_node(); std::fill_n(branch,NumberOfParameters,reinterpret_cast<expression_node_ptr>(0)); scoped_delete<expression_node_t,NumberOfParameters> sd(p,branch); p.next_token(); if (!p.token_is(token_t::e_lbracket)) { p.set_error( make_error(parser_error::e_syntax, p.current_token(), "ERR126 - Expected '(' for special function '" + sf_name + "'", exprtk_error_location)); return error_node(); } for (std::size_t i = 0; i < NumberOfParameters; ++i) { branch[i] = p.parse_expression(); if (0 == branch[i]) { return p.error_node(); } else if (i < (NumberOfParameters - 1)) { if (!p.token_is(token_t::e_comma)) { p.set_error( make_error(parser_error::e_syntax, p.current_token(), "ERR127 - Expected ',' before next parameter of special function '" + sf_name + "'", exprtk_error_location)); return p.error_node(); } } } if (!p.token_is(token_t::e_rbracket)) { p.set_error( make_error(parser_error::e_syntax, p.current_token(), "ERR128 - Invalid number of parameters for special function '" + sf_name + "'", exprtk_error_location)); return p.error_node(); } else result = p.expression_generator_.special_function(opt_type,branch); sd.delete_ptr = (0 == result); return result; } }; inline expression_node_ptr parse_special_function() { const std::string sf_name = current_token().value; // Expect: $fDD(expr0,expr1,expr2) or $fDD(expr0,expr1,expr2,expr3) if ( !details::is_digit(sf_name[2]) || !details::is_digit(sf_name[3]) ) { set_error( make_error(parser_error::e_token, current_token(), "ERR129 - Invalid special function[1]: " + sf_name, exprtk_error_location)); return error_node(); } const int id = (sf_name[2] - '0') * 10 + (sf_name[3] - '0'); if (id >= details::e_sffinal) { set_error( make_error(parser_error::e_token, current_token(), "ERR130 - Invalid special function[2]: " + sf_name, exprtk_error_location)); return error_node(); } const int sf_3_to_4 = details::e_sf48; const details::operator_type opt_type = details::operator_type(id + 1000); const std::size_t NumberOfParameters = (id < (sf_3_to_4 - 1000)) ? 3U : 4U; switch (NumberOfParameters) { case 3 : return parse_special_function_impl<T,3>::process((*this), opt_type, sf_name); case 4 : return parse_special_function_impl<T,4>::process((*this), opt_type, sf_name); default : return error_node(); } } inline expression_node_ptr parse_null_statement() { next_token(); return node_allocator_.allocate<details::null_node<T> >(); } #ifndef exprtk_disable_break_continue inline expression_node_ptr parse_break_statement() { if (state_.parsing_break_stmt) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR131 - Break call within a break call is not allowed", exprtk_error_location)); return error_node(); } scoped_bool_negator sbn(state_.parsing_break_stmt); if (!brkcnt_list_.empty()) { next_token(); brkcnt_list_.front() = true; expression_node_ptr return_expr = error_node(); if (token_is(token_t::e_lsqrbracket)) { if (0 == (return_expr = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR132 - Failed to parse return expression for 'break' statement", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_rsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR133 - Expected ']' at the completion of break's return expression", exprtk_error_location)); free_node(node_allocator_,return_expr); return error_node(); } } state_.activate_side_effect("parse_break_statement()"); return node_allocator_.allocate<details::break_node<T> >(return_expr); } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR134 - Invalid use of 'break', allowed only in the scope of a loop", exprtk_error_location)); } return error_node(); } inline expression_node_ptr parse_continue_statement() { if (!brkcnt_list_.empty()) { next_token(); brkcnt_list_.front() = true; state_.activate_side_effect("parse_continue_statement()"); return node_allocator_.allocate<details::continue_node<T> >(); } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR135 - Invalid use of 'continue', allowed only in the scope of a loop", exprtk_error_location)); return error_node(); } } #endif inline expression_node_ptr parse_define_vector_statement(const std::string& vec_name) { expression_node_ptr size_expr = error_node(); if (!token_is(token_t::e_lsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR136 - Expected '[' as part of vector size definition", exprtk_error_location)); return error_node(); } else if (0 == (size_expr = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR137 - Failed to determine size of vector '" + vec_name + "'", exprtk_error_location)); return error_node(); } else if (!is_constant_node(size_expr)) { free_node(node_allocator_,size_expr); set_error( make_error(parser_error::e_syntax, current_token(), "ERR138 - Expected a literal number as size of vector '" + vec_name + "'", exprtk_error_location)); return error_node(); } T vector_size = size_expr->value(); free_node(node_allocator_,size_expr); if ( (vector_size <= T(0)) || std::not_equal_to<T>() (T(0),vector_size - details::numeric::trunc(vector_size)) ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR139 - Invalid vector size. Must be an integer greater than zero, size: " + details::to_str(details::numeric::to_int32(vector_size)), exprtk_error_location)); return error_node(); } std::vector<expression_node_ptr> vec_initilizer_list; scoped_vec_delete<expression_node_t> svd((*this),vec_initilizer_list); bool single_value_initialiser = false; bool vec_to_vec_initialiser = false; bool null_initialisation = false; if (!token_is(token_t::e_rsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR140 - Expected ']' as part of vector size definition", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_eof)) { if (!token_is(token_t::e_assign)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR141 - Expected ':=' as part of vector definition", exprtk_error_location)); return error_node(); } else if (token_is(token_t::e_lsqrbracket)) { expression_node_ptr initialiser = parse_expression(); if (0 == initialiser) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR142 - Failed to parse single vector initialiser", exprtk_error_location)); return error_node(); } vec_initilizer_list.push_back(initialiser); if (!token_is(token_t::e_rsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR143 - Expected ']' to close single value vector initialiser", exprtk_error_location)); return error_node(); } single_value_initialiser = true; } else if (!token_is(token_t::e_lcrlbracket)) { expression_node_ptr initialiser = error_node(); // Is this a vector to vector assignment and initialisation? if (token_t::e_symbol == current_token().type) { // Is it a locally defined vector? scope_element& se = sem_.get_active_element(current_token().value); if (scope_element::e_vector == se.type) { if (0 != (initialiser = parse_expression())) vec_initilizer_list.push_back(initialiser); else return error_node(); } // Are we dealing with a user defined vector? else if (symtab_store_.is_vector(current_token().value)) { lodge_symbol(current_token().value,e_st_vector); if (0 != (initialiser = parse_expression())) vec_initilizer_list.push_back(initialiser); else return error_node(); } // Are we dealing with a null initialisation vector definition? else if (token_is(token_t::e_symbol,"null")) null_initialisation = true; } if (!null_initialisation) { if (0 == initialiser) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR144 - Expected '{' as part of vector initialiser list", exprtk_error_location)); return error_node(); } else vec_to_vec_initialiser = true; } } else if (!token_is(token_t::e_rcrlbracket)) { for ( ; ; ) { expression_node_ptr initialiser = parse_expression(); if (0 == initialiser) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR145 - Expected '{' as part of vector initialiser list", exprtk_error_location)); return error_node(); } else vec_initilizer_list.push_back(initialiser); if (token_is(token_t::e_rcrlbracket)) break; bool is_next_close = peek_token_is(token_t::e_rcrlbracket); if (!token_is(token_t::e_comma) && is_next_close) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR146 - Expected ',' between vector initialisers", exprtk_error_location)); return error_node(); } if (token_is(token_t::e_rcrlbracket)) break; } } if ( !token_is(token_t::e_rbracket ,prsrhlpr_t::e_hold) && !token_is(token_t::e_rcrlbracket,prsrhlpr_t::e_hold) && !token_is(token_t::e_rsqrbracket,prsrhlpr_t::e_hold) ) { if (!token_is(token_t::e_eof)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR147 - Expected ';' at end of vector definition", exprtk_error_location)); return error_node(); } } if (vec_initilizer_list.size() > vector_size) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR148 - Initialiser list larger than the number of elements in the vector: '" + vec_name + "'", exprtk_error_location)); return error_node(); } } typename symbol_table_t::vector_holder_ptr vec_holder = typename symbol_table_t::vector_holder_ptr(0); const std::size_t vec_size = static_cast<std::size_t>(details::numeric::to_int32(vector_size)); scope_element& se = sem_.get_element(vec_name); if (se.name == vec_name) { if (se.active) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR149 - Illegal redefinition of local vector: '" + vec_name + "'", exprtk_error_location)); return error_node(); } else if ( (se.size == vec_size) && (scope_element::e_vector == se.type) ) { vec_holder = se.vec_node; se.active = true; se.depth = state_.scope_depth; se.ref_count++; } } if (0 == vec_holder) { scope_element nse; nse.name = vec_name; nse.active = true; nse.ref_count = 1; nse.type = scope_element::e_vector; nse.depth = state_.scope_depth; nse.size = vec_size; nse.data = new T[vec_size]; nse.vec_node = new typename scope_element::vector_holder_t((T*)(nse.data),nse.size); if (!sem_.add_element(nse)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR150 - Failed to add new local vector '" + vec_name + "' to SEM", exprtk_error_location)); sem_.free_element(nse); return error_node(); } vec_holder = nse.vec_node; exprtk_debug(("parse_define_vector_statement() - INFO - Added new local vector: %s[%d]\n", nse.name.c_str(), static_cast<int>(nse.size))); } state_.activate_side_effect("parse_define_vector_statement()"); lodge_symbol(vec_name,e_st_local_vector); expression_node_ptr result = error_node(); if (null_initialisation) result = expression_generator_(T(0.0)); else if (vec_to_vec_initialiser) result = expression_generator_( details::e_assign, node_allocator_.allocate<vector_node_t>(vec_holder), vec_initilizer_list[0]); else result = node_allocator_ .allocate<details::vector_assignment_node<T> >( (*vec_holder)[0], vec_size, vec_initilizer_list, single_value_initialiser); svd.delete_ptr = (0 == result); return result; } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr parse_define_string_statement(const std::string& str_name, expression_node_ptr initialisation_expression) { stringvar_node_t* str_node = reinterpret_cast<stringvar_node_t*>(0); scope_element& se = sem_.get_element(str_name); if (se.name == str_name) { if (se.active) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR151 - Illegal redefinition of local variable: '" + str_name + "'", exprtk_error_location)); free_node(node_allocator_,initialisation_expression); return error_node(); } else if (scope_element::e_string == se.type) { str_node = se.str_node; se.active = true; se.depth = state_.scope_depth; se.ref_count++; } } if (0 == str_node) { scope_element nse; nse.name = str_name; nse.active = true; nse.ref_count = 1; nse.type = scope_element::e_string; nse.depth = state_.scope_depth; nse.data = new std::string; nse.str_node = new stringvar_node_t(*(std::string*)(nse.data)); if (!sem_.add_element(nse)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR152 - Failed to add new local string variable '" + str_name + "' to SEM", exprtk_error_location)); free_node(node_allocator_,initialisation_expression); sem_.free_element(nse); return error_node(); } str_node = nse.str_node; exprtk_debug(("parse_define_string_statement() - INFO - Added new local string variable: %s\n",nse.name.c_str())); } lodge_symbol(str_name,e_st_local_string); state_.activate_side_effect("parse_define_string_statement()"); expression_node_ptr branch[2] = {0}; branch[0] = str_node; branch[1] = initialisation_expression; return expression_generator_(details::e_assign,branch); } #else inline expression_node_ptr parse_define_string_statement(const std::string&, expression_node_ptr) { return error_node(); } #endif inline bool local_variable_is_shadowed(const std::string& symbol) { const scope_element& se = sem_.get_element(symbol); return (se.name == symbol) && se.active; } inline expression_node_ptr parse_define_var_statement() { if (settings_.vardef_disabled()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR153 - Illegal variable definition", exprtk_error_location)); return error_node(); } else if (!details::imatch(current_token().value,"var")) { return error_node(); } else next_token(); const std::string var_name = current_token().value; expression_node_ptr initialisation_expression = error_node(); if (!token_is(token_t::e_symbol)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR154 - Expected a symbol for variable definition", exprtk_error_location)); return error_node(); } else if (details::is_reserved_symbol(var_name)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR155 - Illegal redefinition of reserved keyword: '" + var_name + "'", exprtk_error_location)); return error_node(); } else if (symtab_store_.symbol_exists(var_name)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR156 - Illegal redefinition of variable '" + var_name + "'", exprtk_error_location)); return error_node(); } else if (local_variable_is_shadowed(var_name)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR157 - Illegal redefinition of local variable: '" + var_name + "'", exprtk_error_location)); return error_node(); } else if (token_is(token_t::e_lsqrbracket,prsrhlpr_t::e_hold)) { return parse_define_vector_statement(var_name); } else if (token_is(token_t::e_lcrlbracket,prsrhlpr_t::e_hold)) { return parse_uninitialised_var_statement(var_name); } else if (token_is(token_t::e_assign)) { if (0 == (initialisation_expression = parse_expression())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR158 - Failed to parse initialisation expression", exprtk_error_location)); return error_node(); } } if ( !token_is(token_t::e_rbracket ,prsrhlpr_t::e_hold) && !token_is(token_t::e_rcrlbracket,prsrhlpr_t::e_hold) && !token_is(token_t::e_rsqrbracket,prsrhlpr_t::e_hold) ) { if (!token_is(token_t::e_eof,prsrhlpr_t::e_hold)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR159 - Expected ';' after variable definition", exprtk_error_location)); free_node(node_allocator_,initialisation_expression); return error_node(); } } if ( (0 != initialisation_expression) && details::is_generally_string_node(initialisation_expression) ) { return parse_define_string_statement(var_name,initialisation_expression); } expression_node_ptr var_node = reinterpret_cast<expression_node_ptr>(0); scope_element& se = sem_.get_element(var_name); if (se.name == var_name) { if (se.active) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR160 - Illegal redefinition of local variable: '" + var_name + "'", exprtk_error_location)); free_node(node_allocator_, initialisation_expression); return error_node(); } else if (scope_element::e_variable == se.type) { var_node = se.var_node; se.active = true; se.depth = state_.scope_depth; se.ref_count++; } } if (0 == var_node) { scope_element nse; nse.name = var_name; nse.active = true; nse.ref_count = 1; nse.type = scope_element::e_variable; nse.depth = state_.scope_depth; nse.data = new T(T(0)); nse.var_node = node_allocator_.allocate<variable_node_t>(*(T*)(nse.data)); if (!sem_.add_element(nse)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR161 - Failed to add new local variable '" + var_name + "' to SEM", exprtk_error_location)); free_node(node_allocator_, initialisation_expression); sem_.free_element(nse); return error_node(); } var_node = nse.var_node; exprtk_debug(("parse_define_var_statement() - INFO - Added new local variable: %s\n",nse.name.c_str())); } state_.activate_side_effect("parse_define_var_statement()"); lodge_symbol(var_name,e_st_local_variable); expression_node_ptr branch[2] = {0}; branch[0] = var_node; branch[1] = initialisation_expression ? initialisation_expression : expression_generator_(T(0)); return expression_generator_(details::e_assign,branch); } inline expression_node_ptr parse_uninitialised_var_statement(const std::string& var_name) { if ( !token_is(token_t::e_lcrlbracket) || !token_is(token_t::e_rcrlbracket) ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR162 - Expected a '{}' for uninitialised var definition", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_eof,prsrhlpr_t::e_hold)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR163 - Expected ';' after uninitialised variable definition", exprtk_error_location)); return error_node(); } expression_node_ptr var_node = reinterpret_cast<expression_node_ptr>(0); scope_element& se = sem_.get_element(var_name); if (se.name == var_name) { if (se.active) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR164 - Illegal redefinition of local variable: '" + var_name + "'", exprtk_error_location)); return error_node(); } else if (scope_element::e_variable == se.type) { var_node = se.var_node; se.active = true; se.ref_count++; } } if (0 == var_node) { scope_element nse; nse.name = var_name; nse.active = true; nse.ref_count = 1; nse.type = scope_element::e_variable; nse.depth = state_.scope_depth; nse.ip_index = sem_.next_ip_index(); nse.data = new T(T(0)); nse.var_node = node_allocator_.allocate<variable_node_t>(*(T*)(nse.data)); if (!sem_.add_element(nse)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR165 - Failed to add new local variable '" + var_name + "' to SEM", exprtk_error_location)); sem_.free_element(nse); return error_node(); } exprtk_debug(("parse_uninitialised_var_statement() - INFO - Added new local variable: %s\n", nse.name.c_str())); } lodge_symbol(var_name,e_st_local_variable); state_.activate_side_effect("parse_uninitialised_var_statement()"); return expression_generator_(T(0)); } inline expression_node_ptr parse_swap_statement() { if (!details::imatch(current_token().value,"swap")) { return error_node(); } else next_token(); if (!token_is(token_t::e_lbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR166 - Expected '(' at start of swap statement", exprtk_error_location)); return error_node(); } expression_node_ptr variable0 = error_node(); expression_node_ptr variable1 = error_node(); bool variable0_generated = false; bool variable1_generated = false; const std::string var0_name = current_token().value; if (!token_is(token_t::e_symbol,prsrhlpr_t::e_hold)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR167 - Expected a symbol for variable or vector element definition", exprtk_error_location)); return error_node(); } else if (peek_token_is(token_t::e_lsqrbracket)) { if (0 == (variable0 = parse_vector())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR168 - First parameter to swap is an invalid vector element: '" + var0_name + "'", exprtk_error_location)); return error_node(); } variable0_generated = true; } else { if (symtab_store_.is_variable(var0_name)) { variable0 = symtab_store_.get_variable(var0_name); } scope_element& se = sem_.get_element(var0_name); if ( (se.active) && (se.name == var0_name) && (scope_element::e_variable == se.type) ) { variable0 = se.var_node; } lodge_symbol(var0_name,e_st_variable); if (0 == variable0) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR169 - First parameter to swap is an invalid variable: '" + var0_name + "'", exprtk_error_location)); return error_node(); } else next_token(); } if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR170 - Expected ',' between parameters to swap", exprtk_error_location)); if (variable0_generated) { free_node(node_allocator_,variable0); } return error_node(); } const std::string var1_name = current_token().value; if (!token_is(token_t::e_symbol,prsrhlpr_t::e_hold)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR171 - Expected a symbol for variable or vector element definition", exprtk_error_location)); if (variable0_generated) { free_node(node_allocator_,variable0); } return error_node(); } else if (peek_token_is(token_t::e_lsqrbracket)) { if (0 == (variable1 = parse_vector())) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR172 - Second parameter to swap is an invalid vector element: '" + var1_name + "'", exprtk_error_location)); if (variable0_generated) { free_node(node_allocator_,variable0); } return error_node(); } variable1_generated = true; } else { if (symtab_store_.is_variable(var1_name)) { variable1 = symtab_store_.get_variable(var1_name); } scope_element& se = sem_.get_element(var1_name); if ( (se.active) && (se.name == var1_name) && (scope_element::e_variable == se.type) ) { variable1 = se.var_node; } lodge_symbol(var1_name,e_st_variable); if (0 == variable1) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR173 - Second parameter to swap is an invalid variable: '" + var1_name + "'", exprtk_error_location)); if (variable0_generated) { free_node(node_allocator_,variable0); } return error_node(); } else next_token(); } if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR174 - Expected ')' at end of swap statement", exprtk_error_location)); if (variable0_generated) { free_node(node_allocator_,variable0); } if (variable1_generated) { free_node(node_allocator_,variable1); } return error_node(); } typedef details::variable_node<T>* variable_node_ptr; variable_node_ptr v0 = variable_node_ptr(0); variable_node_ptr v1 = variable_node_ptr(0); expression_node_ptr result = error_node(); if ( (0 != (v0 = dynamic_cast<variable_node_ptr>(variable0))) && (0 != (v1 = dynamic_cast<variable_node_ptr>(variable1))) ) { result = node_allocator_.allocate<details::swap_node<T> >(v0, v1); if (variable0_generated) { free_node(node_allocator_,variable0); } if (variable1_generated) { free_node(node_allocator_,variable1); } } else result = node_allocator_.allocate<details::swap_generic_node<T> > (variable0, variable1); state_.activate_side_effect("parse_swap_statement()"); return result; } #ifndef exprtk_disable_return_statement inline expression_node_ptr parse_return_statement() { if (state_.parsing_return_stmt) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR175 - Return call within a return call is not allowed", exprtk_error_location)); return error_node(); } scoped_bool_negator sbn(state_.parsing_return_stmt); std::vector<expression_node_ptr> arg_list; scoped_vec_delete<expression_node_t> sdd((*this),arg_list); if (!details::imatch(current_token().value,"return")) { return error_node(); } else next_token(); if (!token_is(token_t::e_lsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR176 - Expected '[' at start of return statement", exprtk_error_location)); return error_node(); } else if (!token_is(token_t::e_rsqrbracket)) { for ( ; ; ) { expression_node_ptr arg = parse_expression(); if (0 == arg) return error_node(); arg_list.push_back(arg); if (token_is(token_t::e_rsqrbracket)) break; else if (!token_is(token_t::e_comma)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR177 - Expected ',' between values during call to return", exprtk_error_location)); return error_node(); } } } else if (settings_.zero_return_disabled()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR178 - Zero parameter return statement not allowed", exprtk_error_location)); return error_node(); } lexer::token prev_token = current_token(); if (token_is(token_t::e_rsqrbracket)) { if (!arg_list.empty()) { set_error( make_error(parser_error::e_syntax, prev_token, "ERR179 - Invalid ']' found during return call", exprtk_error_location)); return error_node(); } } std::string ret_param_type_list; for (std::size_t i = 0; i < arg_list.size(); ++i) { if (0 == arg_list[i]) return error_node(); else if (is_ivector_node(arg_list[i])) ret_param_type_list += 'V'; else if (is_generally_string_node(arg_list[i])) ret_param_type_list += 'S'; else ret_param_type_list += 'T'; } dec_.retparam_list_.push_back(ret_param_type_list); expression_node_ptr result = expression_generator_.return_call(arg_list); sdd.delete_ptr = (0 == result); state_.return_stmt_present = true; state_.activate_side_effect("parse_return_statement()"); return result; } #else inline expression_node_ptr parse_return_statement() { return error_node(); } #endif inline bool post_variable_process(const std::string& symbol) { if ( peek_token_is(token_t::e_lbracket ) || peek_token_is(token_t::e_lcrlbracket) || peek_token_is(token_t::e_lsqrbracket) ) { if (!settings_.commutative_check_enabled()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR180 - Invalid sequence of variable '"+ symbol + "' and bracket", exprtk_error_location)); return false; } lexer().insert_front(token_t::e_mul); } return true; } inline bool post_bracket_process(const typename token_t::token_type& token, expression_node_ptr& branch) { bool implied_mul = false; if (is_generally_string_node(branch)) return true; const lexer::parser_helper::token_advance_mode hold = prsrhlpr_t::e_hold; switch (token) { case token_t::e_lcrlbracket : implied_mul = token_is(token_t::e_lbracket ,hold) || token_is(token_t::e_lcrlbracket,hold) || token_is(token_t::e_lsqrbracket,hold) ; break; case token_t::e_lbracket : implied_mul = token_is(token_t::e_lbracket ,hold) || token_is(token_t::e_lcrlbracket,hold) || token_is(token_t::e_lsqrbracket,hold) ; break; case token_t::e_lsqrbracket : implied_mul = token_is(token_t::e_lbracket ,hold) || token_is(token_t::e_lcrlbracket,hold) || token_is(token_t::e_lsqrbracket,hold) ; break; default : return true; } if (implied_mul) { if (!settings_.commutative_check_enabled()) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR181 - Invalid sequence of brackets", exprtk_error_location)); return false; } else if (token_t::e_eof != current_token().type) { lexer().insert_front(current_token().type); lexer().insert_front(token_t::e_mul); next_token(); } } return true; } inline expression_node_ptr parse_symtab_symbol() { const std::string symbol = current_token().value; // Are we dealing with a variable or a special constant? expression_node_ptr variable = symtab_store_.get_variable(symbol); if (variable) { if (symtab_store_.is_constant_node(symbol)) { variable = expression_generator_(variable->value()); } if (!post_variable_process(symbol)) return error_node(); lodge_symbol(symbol,e_st_variable); next_token(); return variable; } // Are we dealing with a locally defined variable, vector or string? if (!sem_.empty()) { scope_element& se = sem_.get_active_element(symbol); if (se.active && details::imatch(se.name, symbol)) { if (scope_element::e_variable == se.type) { se.active = true; lodge_symbol(symbol,e_st_local_variable); if (!post_variable_process(symbol)) return error_node(); next_token(); return se.var_node; } else if (scope_element::e_vector == se.type) { return parse_vector(); } #ifndef exprtk_disable_string_capabilities else if (scope_element::e_string == se.type) { return parse_string(); } #endif } } #ifndef exprtk_disable_string_capabilities // Are we dealing with a string variable? if (symtab_store_.is_stringvar(symbol)) { return parse_string(); } #endif { // Are we dealing with a function? ifunction<T>* function = symtab_store_.get_function(symbol); if (function) { lodge_symbol(symbol,e_st_function); expression_node_ptr func_node = parse_function_invocation(function,symbol); if (func_node) return func_node; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR182 - Failed to generate node for function: '" + symbol + "'", exprtk_error_location)); return error_node(); } } } { // Are we dealing with a vararg function? ivararg_function<T>* vararg_function = symtab_store_.get_vararg_function(symbol); if (vararg_function) { lodge_symbol(symbol,e_st_function); expression_node_ptr vararg_func_node = parse_vararg_function_call(vararg_function, symbol); if (vararg_func_node) return vararg_func_node; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR183 - Failed to generate node for vararg function: '" + symbol + "'", exprtk_error_location)); return error_node(); } } } { // Are we dealing with a vararg generic function? igeneric_function<T>* generic_function = symtab_store_.get_generic_function(symbol); if (generic_function) { lodge_symbol(symbol,e_st_function); expression_node_ptr genericfunc_node = parse_generic_function_call(generic_function, symbol); if (genericfunc_node) return genericfunc_node; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR184 - Failed to generate node for generic function: '" + symbol + "'", exprtk_error_location)); return error_node(); } } } #ifndef exprtk_disable_string_capabilities { // Are we dealing with a vararg string returning function? igeneric_function<T>* string_function = symtab_store_.get_string_function(symbol); if (string_function) { lodge_symbol(symbol,e_st_function); expression_node_ptr stringfunc_node = parse_string_function_call(string_function, symbol); if (stringfunc_node) return stringfunc_node; else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR185 - Failed to generate node for string function: '" + symbol + "'", exprtk_error_location)); return error_node(); } } } #endif // Are we dealing with a vector? if (symtab_store_.is_vector(symbol)) { lodge_symbol(symbol,e_st_vector); return parse_vector(); } if (details::is_reserved_symbol(symbol)) { if ( settings_.function_enabled(symbol) || !details::is_base_function(symbol) ) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR186 - Invalid use of reserved symbol '" + symbol + "'", exprtk_error_location)); return error_node(); } } // Should we handle unknown symbols? if (resolve_unknown_symbol_ && unknown_symbol_resolver_) { if (!(settings_.rsrvd_sym_usr_disabled() && details::is_reserved_symbol(symbol))) { symbol_table_t& symtab = symtab_store_.get_symbol_table(); std::string error_message; if (unknown_symbol_resolver::e_usrmode_default == unknown_symbol_resolver_->mode) { T default_value = T(0); typename unknown_symbol_resolver::usr_symbol_type usr_symbol_type; if (unknown_symbol_resolver_->process(symbol, usr_symbol_type, default_value, error_message)) { bool create_result = false; switch (usr_symbol_type) { case unknown_symbol_resolver::e_usr_variable_type : create_result = symtab.create_variable(symbol, default_value); break; case unknown_symbol_resolver::e_usr_constant_type : create_result = symtab.add_constant(symbol, default_value); break; default : create_result = false; } if (create_result) { expression_node_ptr var = symtab_store_.get_variable(symbol); if (var) { if (symtab_store_.is_constant_node(symbol)) { var = expression_generator_(var->value()); } lodge_symbol(symbol,e_st_variable); if (!post_variable_process(symbol)) return error_node(); next_token(); return var; } } } set_error( make_error(parser_error::e_symtab, current_token(), "ERR187 - Failed to create variable: '" + symbol + "'" + (error_message.empty() ? "" : " - " + error_message), exprtk_error_location)); } else if (unknown_symbol_resolver::e_usrmode_extended == unknown_symbol_resolver_->mode) { if (unknown_symbol_resolver_->process(symbol, symtab, error_message)) { expression_node_ptr result = parse_symtab_symbol(); if (result) { return result; } } set_error( make_error(parser_error::e_symtab, current_token(), "ERR188 - Failed to resolve symbol: '" + symbol + "'" + (error_message.empty() ? "" : " - " + error_message), exprtk_error_location)); } return error_node(); } } set_error( make_error(parser_error::e_syntax, current_token(), "ERR189 - Undefined symbol: '" + symbol + "'", exprtk_error_location)); return error_node(); } inline expression_node_ptr parse_symbol() { static const std::string symbol_if = "if" ; static const std::string symbol_while = "while" ; static const std::string symbol_repeat = "repeat" ; static const std::string symbol_for = "for" ; static const std::string symbol_switch = "switch" ; static const std::string symbol_null = "null" ; static const std::string symbol_break = "break" ; static const std::string symbol_continue = "continue"; static const std::string symbol_var = "var" ; static const std::string symbol_swap = "swap" ; static const std::string symbol_return = "return" ; if (valid_vararg_operation(current_token().value)) { return parse_vararg_function(); } else if (valid_base_operation(current_token().value)) { return parse_base_operation(); } else if ( details::imatch(current_token().value, symbol_if) && settings_.control_struct_enabled(current_token().value) ) { return parse_conditional_statement(); } else if ( details::imatch(current_token().value, symbol_while) && settings_.control_struct_enabled(current_token().value) ) { return parse_while_loop(); } else if ( details::imatch(current_token().value, symbol_repeat) && settings_.control_struct_enabled(current_token().value) ) { return parse_repeat_until_loop(); } else if ( details::imatch(current_token().value, symbol_for) && settings_.control_struct_enabled(current_token().value) ) { return parse_for_loop(); } else if ( details::imatch(current_token().value, symbol_switch) && settings_.control_struct_enabled(current_token().value) ) { return parse_switch_statement(); } else if (details::is_valid_sf_symbol(current_token().value)) { return parse_special_function(); } else if (details::imatch(current_token().value, symbol_null)) { return parse_null_statement(); } #ifndef exprtk_disable_break_continue else if (details::imatch(current_token().value, symbol_break)) { return parse_break_statement(); } else if (details::imatch(current_token().value, symbol_continue)) { return parse_continue_statement(); } #endif else if (details::imatch(current_token().value, symbol_var)) { return parse_define_var_statement(); } else if (details::imatch(current_token().value, symbol_swap)) { return parse_swap_statement(); } #ifndef exprtk_disable_return_statement else if ( details::imatch(current_token().value, symbol_return) && settings_.control_struct_enabled(current_token().value) ) { return parse_return_statement(); } #endif else if (symtab_store_.valid() || !sem_.empty()) { return parse_symtab_symbol(); } else { set_error( make_error(parser_error::e_symtab, current_token(), "ERR190 - Variable or function detected, yet symbol-table is invalid, Symbol: " + current_token().value, exprtk_error_location)); return error_node(); } } inline expression_node_ptr parse_branch(precedence_level precedence = e_level00) { expression_node_ptr branch = error_node(); if (token_t::e_number == current_token().type) { T numeric_value = T(0); if (details::string_to_real(current_token().value, numeric_value)) { expression_node_ptr literal_exp = expression_generator_(numeric_value); if (0 == literal_exp) { set_error( make_error(parser_error::e_numeric, current_token(), "ERR191 - Failed generate node for scalar: '" + current_token().value + "'", exprtk_error_location)); return error_node(); } next_token(); branch = literal_exp; } else { set_error( make_error(parser_error::e_numeric, current_token(), "ERR192 - Failed to convert '" + current_token().value + "' to a number", exprtk_error_location)); return error_node(); } } else if (token_t::e_symbol == current_token().type) { branch = parse_symbol(); } #ifndef exprtk_disable_string_capabilities else if (token_t::e_string == current_token().type) { branch = parse_const_string(); } #endif else if (token_t::e_lbracket == current_token().type) { next_token(); if (0 == (branch = parse_expression())) return error_node(); else if (!token_is(token_t::e_rbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR193 - Expected ')' instead of: '" + current_token().value + "'", exprtk_error_location)); free_node(node_allocator_,branch); return error_node(); } else if (!post_bracket_process(token_t::e_lbracket,branch)) { free_node(node_allocator_,branch); return error_node(); } } else if (token_t::e_lsqrbracket == current_token().type) { next_token(); if (0 == (branch = parse_expression())) return error_node(); else if (!token_is(token_t::e_rsqrbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR194 - Expected ']' instead of: '" + current_token().value + "'", exprtk_error_location)); free_node(node_allocator_,branch); return error_node(); } else if (!post_bracket_process(token_t::e_lsqrbracket,branch)) { free_node(node_allocator_,branch); return error_node(); } } else if (token_t::e_lcrlbracket == current_token().type) { next_token(); if (0 == (branch = parse_expression())) return error_node(); else if (!token_is(token_t::e_rcrlbracket)) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR195 - Expected '}' instead of: '" + current_token().value + "'", exprtk_error_location)); free_node(node_allocator_,branch); return error_node(); } else if (!post_bracket_process(token_t::e_lcrlbracket,branch)) { free_node(node_allocator_,branch); return error_node(); } } else if (token_t::e_sub == current_token().type) { next_token(); branch = parse_expression(e_level11); if ( branch && !( details::is_neg_unary_node (branch) && simplify_unary_negation_branch(branch) ) ) { branch = expression_generator_(details::e_neg,branch); } } else if (token_t::e_add == current_token().type) { next_token(); branch = parse_expression(e_level13); } else if (token_t::e_eof == current_token().type) { set_error( make_error(parser_error::e_syntax, current_token(), "ERR196 - Premature end of expression[1]", exprtk_error_location)); return error_node(); } else { set_error( make_error(parser_error::e_syntax, current_token(), "ERR197 - Premature end of expression[2]", exprtk_error_location)); return error_node(); } if ( branch && (e_level00 == precedence) && token_is(token_t::e_ternary,prsrhlpr_t::e_hold) ) { branch = parse_ternary_conditional_statement(branch); } parse_pending_string_rangesize(branch); return branch; } template <typename Type> class expression_generator { public: typedef details::expression_node<Type>* expression_node_ptr; typedef expression_node_ptr (*synthesize_functor_t)(expression_generator<T>&, const details::operator_type& operation, expression_node_ptr (&branch)[2]); typedef std::map<std::string,synthesize_functor_t> synthesize_map_t; typedef typename exprtk::parser<Type> parser_t; typedef const Type& vtype; typedef const Type ctype; inline void init_synthesize_map() { #ifndef exprtk_disable_enhanced_features synthesize_map_["(v)o(v)"] = synthesize_vov_expression::process; synthesize_map_["(c)o(v)"] = synthesize_cov_expression::process; synthesize_map_["(v)o(c)"] = synthesize_voc_expression::process; #define register_synthezier(S) \ synthesize_map_[S ::node_type::id()] = S ::process; \ register_synthezier(synthesize_vovov_expression0) register_synthezier(synthesize_vovov_expression1) register_synthezier(synthesize_vovoc_expression0) register_synthezier(synthesize_vovoc_expression1) register_synthezier(synthesize_vocov_expression0) register_synthezier(synthesize_vocov_expression1) register_synthezier(synthesize_covov_expression0) register_synthezier(synthesize_covov_expression1) register_synthezier(synthesize_covoc_expression0) register_synthezier(synthesize_covoc_expression1) register_synthezier(synthesize_cocov_expression1) register_synthezier(synthesize_vococ_expression0) register_synthezier(synthesize_vovovov_expression0) register_synthezier(synthesize_vovovoc_expression0) register_synthezier(synthesize_vovocov_expression0) register_synthezier(synthesize_vocovov_expression0) register_synthezier(synthesize_covovov_expression0) register_synthezier(synthesize_covocov_expression0) register_synthezier(synthesize_vocovoc_expression0) register_synthezier(synthesize_covovoc_expression0) register_synthezier(synthesize_vococov_expression0) register_synthezier(synthesize_vovovov_expression1) register_synthezier(synthesize_vovovoc_expression1) register_synthezier(synthesize_vovocov_expression1) register_synthezier(synthesize_vocovov_expression1) register_synthezier(synthesize_covovov_expression1) register_synthezier(synthesize_covocov_expression1) register_synthezier(synthesize_vocovoc_expression1) register_synthezier(synthesize_covovoc_expression1) register_synthezier(synthesize_vococov_expression1) register_synthezier(synthesize_vovovov_expression2) register_synthezier(synthesize_vovovoc_expression2) register_synthezier(synthesize_vovocov_expression2) register_synthezier(synthesize_vocovov_expression2) register_synthezier(synthesize_covovov_expression2) register_synthezier(synthesize_covocov_expression2) register_synthezier(synthesize_vocovoc_expression2) register_synthezier(synthesize_covovoc_expression2) register_synthezier(synthesize_vovovov_expression3) register_synthezier(synthesize_vovovoc_expression3) register_synthezier(synthesize_vovocov_expression3) register_synthezier(synthesize_vocovov_expression3) register_synthezier(synthesize_covovov_expression3) register_synthezier(synthesize_covocov_expression3) register_synthezier(synthesize_vocovoc_expression3) register_synthezier(synthesize_covovoc_expression3) register_synthezier(synthesize_vococov_expression3) register_synthezier(synthesize_vovovov_expression4) register_synthezier(synthesize_vovovoc_expression4) register_synthezier(synthesize_vovocov_expression4) register_synthezier(synthesize_vocovov_expression4) register_synthezier(synthesize_covovov_expression4) register_synthezier(synthesize_covocov_expression4) register_synthezier(synthesize_vocovoc_expression4) register_synthezier(synthesize_covovoc_expression4) #endif } inline void set_parser(parser_t& p) { parser_ = &p; } inline void set_uom(unary_op_map_t& unary_op_map) { unary_op_map_ = &unary_op_map; } inline void set_bom(binary_op_map_t& binary_op_map) { binary_op_map_ = &binary_op_map; } inline void set_ibom(inv_binary_op_map_t& inv_binary_op_map) { inv_binary_op_map_ = &inv_binary_op_map; } inline void set_sf3m(sf3_map_t& sf3_map) { sf3_map_ = &sf3_map; } inline void set_sf4m(sf4_map_t& sf4_map) { sf4_map_ = &sf4_map; } inline void set_allocator(details::node_allocator& na) { node_allocator_ = &na; } inline void set_strength_reduction_state(const bool enabled) { strength_reduction_enabled_ = enabled; } inline bool strength_reduction_enabled() const { return strength_reduction_enabled_; } inline bool valid_operator(const details::operator_type& operation, binary_functor_t& bop) { typename binary_op_map_t::iterator bop_itr = binary_op_map_->find(operation); if ((*binary_op_map_).end() == bop_itr) return false; bop = bop_itr->second; return true; } inline bool valid_operator(const details::operator_type& operation, unary_functor_t& uop) { typename unary_op_map_t::iterator uop_itr = unary_op_map_->find(operation); if ((*unary_op_map_).end() == uop_itr) return false; uop = uop_itr->second; return true; } inline details::operator_type get_operator(const binary_functor_t& bop) { return (*inv_binary_op_map_).find(bop)->second; } inline expression_node_ptr operator() (const Type& v) const { return node_allocator_->allocate<literal_node_t>(v); } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr operator() (const std::string& s) const { return node_allocator_->allocate<string_literal_node_t>(s); } inline expression_node_ptr operator() (std::string& s, range_t& rp) const { return node_allocator_->allocate_rr<string_range_node_t>(s,rp); } inline expression_node_ptr operator() (const std::string& s, range_t& rp) const { return node_allocator_->allocate_tt<const_string_range_node_t>(s,rp); } inline expression_node_ptr operator() (expression_node_ptr branch, range_t& rp) const { if (is_generally_string_node(branch)) return node_allocator_->allocate_tt<generic_string_range_node_t>(branch,rp); else return error_node(); } #endif inline bool unary_optimisable(const details::operator_type& operation) const { return (details::e_abs == operation) || (details::e_acos == operation) || (details::e_acosh == operation) || (details::e_asin == operation) || (details::e_asinh == operation) || (details::e_atan == operation) || (details::e_atanh == operation) || (details::e_ceil == operation) || (details::e_cos == operation) || (details::e_cosh == operation) || (details::e_exp == operation) || (details::e_expm1 == operation) || (details::e_floor == operation) || (details::e_log == operation) || (details::e_log10 == operation) || (details::e_log2 == operation) || (details::e_log1p == operation) || (details::e_neg == operation) || (details::e_pos == operation) || (details::e_round == operation) || (details::e_sin == operation) || (details::e_sinc == operation) || (details::e_sinh == operation) || (details::e_sqrt == operation) || (details::e_tan == operation) || (details::e_tanh == operation) || (details::e_cot == operation) || (details::e_sec == operation) || (details::e_csc == operation) || (details::e_r2d == operation) || (details::e_d2r == operation) || (details::e_d2g == operation) || (details::e_g2d == operation) || (details::e_notl == operation) || (details::e_sgn == operation) || (details::e_erf == operation) || (details::e_erfc == operation) || (details::e_ncdf == operation) || (details::e_frac == operation) || (details::e_trunc == operation) ; } inline bool sf3_optimisable(const std::string& sf3id, trinary_functor_t& tfunc) { typename sf3_map_t::iterator itr = sf3_map_->find(sf3id); if (sf3_map_->end() == itr) return false; else tfunc = itr->second.first; return true; } inline bool sf4_optimisable(const std::string& sf4id, quaternary_functor_t& qfunc) { typename sf4_map_t::iterator itr = sf4_map_->find(sf4id); if (sf4_map_->end() == itr) return false; else qfunc = itr->second.first; return true; } inline bool sf3_optimisable(const std::string& sf3id, details::operator_type& operation) { typename sf3_map_t::iterator itr = sf3_map_->find(sf3id); if (sf3_map_->end() == itr) return false; else operation = itr->second.second; return true; } inline bool sf4_optimisable(const std::string& sf4id, details::operator_type& operation) { typename sf4_map_t::iterator itr = sf4_map_->find(sf4id); if (sf4_map_->end() == itr) return false; else operation = itr->second.second; return true; } inline expression_node_ptr operator() (const details::operator_type& operation, expression_node_ptr (&branch)[1]) { if (0 == branch[0]) { return error_node(); } else if (details::is_null_node(branch[0])) { return branch[0]; } else if (details::is_break_node(branch[0])) { return error_node(); } else if (details::is_continue_node(branch[0])) { return error_node(); } else if (details::is_constant_node(branch[0])) { return synthesize_expression<unary_node_t,1>(operation,branch); } else if (unary_optimisable(operation) && details::is_variable_node(branch[0])) { return synthesize_uv_expression(operation,branch); } else if (unary_optimisable(operation) && details::is_ivector_node(branch[0])) { return synthesize_uvec_expression(operation,branch); } else return synthesize_unary_expression(operation,branch); } inline bool is_assignment_operation(const details::operator_type& operation) const { return ( (details::e_addass == operation) || (details::e_subass == operation) || (details::e_mulass == operation) || (details::e_divass == operation) || (details::e_modass == operation) ) && parser_->settings_.assignment_enabled(operation); } #ifndef exprtk_disable_string_capabilities inline bool valid_string_operation(const details::operator_type& operation) const { return (details::e_add == operation) || (details::e_lt == operation) || (details::e_lte == operation) || (details::e_gt == operation) || (details::e_gte == operation) || (details::e_eq == operation) || (details::e_ne == operation) || (details::e_in == operation) || (details::e_like == operation) || (details::e_ilike == operation) || (details::e_assign == operation) || (details::e_addass == operation) || (details::e_swap == operation) ; } #else inline bool valid_string_operation(const details::operator_type&) const { return false; } #endif inline std::string to_str(const details::operator_type& operation) const { switch (operation) { case details::e_add : return "+" ; case details::e_sub : return "-" ; case details::e_mul : return "*" ; case details::e_div : return "/" ; case details::e_mod : return "%" ; case details::e_pow : return "^" ; case details::e_lt : return "<" ; case details::e_lte : return "<=" ; case details::e_gt : return ">" ; case details::e_gte : return ">=" ; case details::e_eq : return "==" ; case details::e_ne : return "!=" ; case details::e_and : return "and" ; case details::e_nand : return "nand" ; case details::e_or : return "or" ; case details::e_nor : return "nor" ; case details::e_xor : return "xor" ; case details::e_xnor : return "xnor" ; default : return "UNKNOWN"; } } inline bool operation_optimisable(const details::operator_type& operation) const { return (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) || (details::e_mod == operation) || (details::e_pow == operation) || (details::e_lt == operation) || (details::e_lte == operation) || (details::e_gt == operation) || (details::e_gte == operation) || (details::e_eq == operation) || (details::e_ne == operation) || (details::e_and == operation) || (details::e_nand == operation) || (details::e_or == operation) || (details::e_nor == operation) || (details::e_xor == operation) || (details::e_xnor == operation) ; } inline std::string branch_to_id(expression_node_ptr branch) { static const std::string null_str ("(null)" ); static const std::string const_str ("(c)" ); static const std::string var_str ("(v)" ); static const std::string vov_str ("(vov)" ); static const std::string cov_str ("(cov)" ); static const std::string voc_str ("(voc)" ); static const std::string str_str ("(s)" ); static const std::string strrng_str ("(rngs)" ); static const std::string cs_str ("(cs)" ); static const std::string cstrrng_str("(crngs)"); if (details::is_null_node(branch)) return null_str; else if (details::is_constant_node(branch)) return const_str; else if (details::is_variable_node(branch)) return var_str; else if (details::is_vov_node(branch)) return vov_str; else if (details::is_cov_node(branch)) return cov_str; else if (details::is_voc_node(branch)) return voc_str; else if (details::is_string_node(branch)) return str_str; else if (details::is_const_string_node(branch)) return cs_str; else if (details::is_string_range_node(branch)) return strrng_str; else if (details::is_const_string_range_node(branch)) return cstrrng_str; else if (details::is_t0ot1ot2_node(branch)) return "(" + dynamic_cast<details::T0oT1oT2_base_node<T>*>(branch)->type_id() + ")"; else if (details::is_t0ot1ot2ot3_node(branch)) return "(" + dynamic_cast<details::T0oT1oT2oT3_base_node<T>*>(branch)->type_id() + ")"; else return "ERROR"; } inline std::string branch_to_id(expression_node_ptr (&branch)[2]) { return branch_to_id(branch[0]) + std::string("o") + branch_to_id(branch[1]); } inline bool cov_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return details::is_constant_node(branch[0]) && details::is_variable_node(branch[1]) ; } inline bool voc_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return details::is_variable_node(branch[0]) && details::is_constant_node(branch[1]) ; } inline bool vov_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return details::is_variable_node(branch[0]) && details::is_variable_node(branch[1]) ; } inline bool cob_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return details::is_constant_node(branch[0]) && !details::is_constant_node(branch[1]) ; } inline bool boc_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return !details::is_constant_node(branch[0]) && details::is_constant_node(branch[1]) ; } inline bool cocob_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) ) { return (details::is_constant_node(branch[0]) && details::is_cob_node(branch[1])) || (details::is_constant_node(branch[1]) && details::is_cob_node(branch[0])) ; } else return false; } inline bool coboc_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) ) { return (details::is_constant_node(branch[0]) && details::is_boc_node(branch[1])) || (details::is_constant_node(branch[1]) && details::is_boc_node(branch[0])) ; } else return false; } inline bool uvouv_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return details::is_uv_node(branch[0]) && details::is_uv_node(branch[1]) ; } inline bool vob_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return details::is_variable_node(branch[0]) && !details::is_variable_node(branch[1]) ; } inline bool bov_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return !details::is_variable_node(branch[0]) && details::is_variable_node(branch[1]) ; } inline bool binext_optimisable(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!operation_optimisable(operation)) return false; else return !details::is_constant_node(branch[0]) || !details::is_constant_node(branch[1]) ; } inline bool is_invalid_assignment_op(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { if (is_assignment_operation(operation)) { const bool b1_is_genstring = details::is_generally_string_node(branch[1]); if (details::is_string_node(branch[0])) return !b1_is_genstring; else return ( !details::is_variable_node (branch[0]) && !details::is_vector_elem_node (branch[0]) && !details::is_rebasevector_elem_node (branch[0]) && !details::is_rebasevector_celem_node(branch[0]) && !details::is_vector_node (branch[0]) ) || b1_is_genstring; } else return false; } inline bool is_constpow_operation(const details::operator_type& operation, expression_node_ptr(&branch)[2]) { if ( !details::is_constant_node(branch[1]) || details::is_constant_node(branch[0]) || details::is_variable_node(branch[0]) || details::is_vector_node (branch[0]) || details::is_generally_string_node(branch[0]) ) return false; const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); return cardinal_pow_optimisable(operation, c); } inline bool is_invalid_break_continue_op(expression_node_ptr (&branch)[2]) { return ( details::is_break_node (branch[0]) || details::is_break_node (branch[1]) || details::is_continue_node(branch[0]) || details::is_continue_node(branch[1]) ); } inline bool is_invalid_string_op(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const bool b0_string = is_generally_string_node(branch[0]); const bool b1_string = is_generally_string_node(branch[1]); bool result = false; if (b0_string != b1_string) result = true; else if (!valid_string_operation(operation) && b0_string && b1_string) result = true; if (result) { parser_->set_synthesis_error("Invalid string operation"); } return result; } inline bool is_invalid_string_op(const details::operator_type& operation, expression_node_ptr (&branch)[3]) { const bool b0_string = is_generally_string_node(branch[0]); const bool b1_string = is_generally_string_node(branch[1]); const bool b2_string = is_generally_string_node(branch[2]); bool result = false; if ((b0_string != b1_string) || (b1_string != b2_string)) result = true; else if ((details::e_inrange != operation) && b0_string && b1_string && b2_string) result = true; if (result) { parser_->set_synthesis_error("Invalid string operation"); } return result; } inline bool is_string_operation(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const bool b0_string = is_generally_string_node(branch[0]); const bool b1_string = is_generally_string_node(branch[1]); return (b0_string && b1_string && valid_string_operation(operation)); } inline bool is_string_operation(const details::operator_type& operation, expression_node_ptr (&branch)[3]) { const bool b0_string = is_generally_string_node(branch[0]); const bool b1_string = is_generally_string_node(branch[1]); const bool b2_string = is_generally_string_node(branch[2]); return (b0_string && b1_string && b2_string && (details::e_inrange == operation)); } #ifndef exprtk_disable_sc_andor inline bool is_shortcircuit_expression(const details::operator_type& operation) const { return ( (details::e_scand == operation) || (details::e_scor == operation) ); } #else inline bool is_shortcircuit_expression(const details::operator_type&) const { return false; } #endif inline bool is_null_present(expression_node_ptr (&branch)[2]) const { return ( details::is_null_node(branch[0]) || details::is_null_node(branch[1]) ); } inline bool is_vector_eqineq_logic_operation(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!is_ivector_node(branch[0]) && !is_ivector_node(branch[1])) return false; else return ( (details::e_lt == operation) || (details::e_lte == operation) || (details::e_gt == operation) || (details::e_gte == operation) || (details::e_eq == operation) || (details::e_ne == operation) || (details::e_equal == operation) || (details::e_and == operation) || (details::e_nand == operation) || (details:: e_or == operation) || (details:: e_nor == operation) || (details:: e_xor == operation) || (details::e_xnor == operation) ); } inline bool is_vector_arithmetic_operation(const details::operator_type& operation, expression_node_ptr (&branch)[2]) const { if (!is_ivector_node(branch[0]) && !is_ivector_node(branch[1])) return false; else return ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) || (details::e_pow == operation) ); } inline expression_node_ptr operator() (const details::operator_type& operation, expression_node_ptr (&branch)[2]) { if ((0 == branch[0]) || (0 == branch[1])) { return error_node(); } else if (is_invalid_string_op(operation,branch)) { return error_node(); } else if (is_invalid_assignment_op(operation,branch)) { return error_node(); } else if (is_invalid_break_continue_op(branch)) { return error_node(); } else if (details::e_assign == operation) { return synthesize_assignment_expression(operation, branch); } else if (details::e_swap == operation) { return synthesize_swap_expression(branch); } else if (is_assignment_operation(operation)) { return synthesize_assignment_operation_expression(operation, branch); } else if (is_vector_eqineq_logic_operation(operation, branch)) { return synthesize_veceqineqlogic_operation_expression(operation, branch); } else if (is_vector_arithmetic_operation(operation, branch)) { return synthesize_vecarithmetic_operation_expression(operation, branch); } else if (is_shortcircuit_expression(operation)) { return synthesize_shortcircuit_expression(operation, branch); } else if (is_string_operation(operation, branch)) { return synthesize_string_expression(operation, branch); } else if (is_null_present(branch)) { return synthesize_null_expression(operation, branch); } #ifndef exprtk_disable_cardinal_pow_optimisation else if (is_constpow_operation(operation, branch)) { return cardinal_pow_optimisation(branch); } #endif expression_node_ptr result = error_node(); #ifndef exprtk_disable_enhanced_features if (synthesize_expression(operation, branch, result)) { return result; } else #endif { /* Possible reductions: 1. c o cob -> cob 2. cob o c -> cob 3. c o boc -> boc 4. boc o c -> boc */ result = error_node(); if (cocob_optimisable(operation, branch)) { result = synthesize_cocob_expression::process((*this), operation, branch); } else if (coboc_optimisable(operation, branch) && (0 == result)) { result = synthesize_coboc_expression::process((*this), operation, branch); } if (result) return result; } if (uvouv_optimisable(operation, branch)) { return synthesize_uvouv_expression(operation, branch); } else if (vob_optimisable(operation, branch)) { return synthesize_vob_expression::process((*this), operation, branch); } else if (bov_optimisable(operation, branch)) { return synthesize_bov_expression::process((*this), operation, branch); } else if (cob_optimisable(operation, branch)) { return synthesize_cob_expression::process((*this), operation, branch); } else if (boc_optimisable(operation, branch)) { return synthesize_boc_expression::process((*this), operation, branch); } #ifndef exprtk_disable_enhanced_features else if (cov_optimisable(operation, branch)) { return synthesize_cov_expression::process((*this), operation, branch); } #endif else if (binext_optimisable(operation, branch)) { return synthesize_binary_ext_expression::process((*this), operation, branch); } else return synthesize_expression<binary_node_t,2>(operation, branch); } inline expression_node_ptr operator() (const details::operator_type& operation, expression_node_ptr (&branch)[3]) { if ( (0 == branch[0]) || (0 == branch[1]) || (0 == branch[2]) ) { details::free_all_nodes(*node_allocator_,branch); return error_node(); } else if (is_invalid_string_op(operation, branch)) { return error_node(); } else if (is_string_operation(operation, branch)) { return synthesize_string_expression(operation, branch); } else return synthesize_expression<trinary_node_t,3>(operation, branch); } inline expression_node_ptr operator() (const details::operator_type& operation, expression_node_ptr (&branch)[4]) { return synthesize_expression<quaternary_node_t,4>(operation,branch); } inline expression_node_ptr operator() (const details::operator_type& operation, expression_node_ptr b0) { expression_node_ptr branch[1] = { b0 }; return (*this)(operation,branch); } inline expression_node_ptr operator() (const details::operator_type& operation, expression_node_ptr b0, expression_node_ptr b1) { if ((0 == b0) || (0 == b1)) return error_node(); else { expression_node_ptr branch[2] = { b0, b1 }; return expression_generator<Type>::operator()(operation,branch); } } inline expression_node_ptr conditional(expression_node_ptr condition, expression_node_ptr consequent, expression_node_ptr alternative) const { if ((0 == condition) || (0 == consequent)) { free_node(*node_allocator_, condition); free_node(*node_allocator_, consequent); free_node(*node_allocator_, alternative); return error_node(); } // Can the condition be immediately evaluated? if so optimise. else if (details::is_constant_node(condition)) { // True branch if (details::is_true(condition)) { free_node(*node_allocator_, condition); free_node(*node_allocator_, alternative); return consequent; } // False branch else { free_node(*node_allocator_, condition); free_node(*node_allocator_, consequent); if (alternative) return alternative; else return node_allocator_->allocate<details::null_node<T> >(); } } else if ((0 != consequent) && (0 != alternative)) { return node_allocator_-> allocate<conditional_node_t>(condition, consequent, alternative); } else return node_allocator_-> allocate<cons_conditional_node_t>(condition, consequent); } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr conditional_string(expression_node_ptr condition, expression_node_ptr consequent, expression_node_ptr alternative) const { if ((0 == condition) || (0 == consequent)) { free_node(*node_allocator_, condition); free_node(*node_allocator_, consequent); free_node(*node_allocator_, alternative); return error_node(); } // Can the condition be immediately evaluated? if so optimise. else if (details::is_constant_node(condition)) { // True branch if (details::is_true(condition)) { free_node(*node_allocator_, condition); free_node(*node_allocator_, alternative); return consequent; } // False branch else { free_node(*node_allocator_, condition); free_node(*node_allocator_, consequent); if (alternative) return alternative; else return node_allocator_-> allocate_c<details::string_literal_node<Type> >(""); } } else if ((0 != consequent) && (0 != alternative)) return node_allocator_-> allocate<conditional_string_node_t>(condition, consequent, alternative); else return error_node(); } #else inline expression_node_ptr conditional_string(expression_node_ptr, expression_node_ptr, expression_node_ptr) const { return error_node(); } #endif inline expression_node_ptr while_loop(expression_node_ptr& condition, expression_node_ptr& branch, const bool brkcont = false) const { if (!brkcont && details::is_constant_node(condition)) { expression_node_ptr result = error_node(); if (details::is_true(condition)) // Infinite loops are not allowed. result = error_node(); else result = node_allocator_->allocate<details::null_node<Type> >(); free_node(*node_allocator_, condition); free_node(*node_allocator_, branch); return result; } else if (details::is_null_node(condition)) { free_node(*node_allocator_,condition); return branch; } else if (!brkcont) return node_allocator_->allocate<while_loop_node_t>(condition,branch); #ifndef exprtk_disable_break_continue else return node_allocator_->allocate<while_loop_bc_node_t>(condition,branch); #else return error_node(); #endif } inline expression_node_ptr repeat_until_loop(expression_node_ptr& condition, expression_node_ptr& branch, const bool brkcont = false) const { if (!brkcont && details::is_constant_node(condition)) { if ( details::is_true(condition) && details::is_constant_node(branch) ) { free_node(*node_allocator_,condition); return branch; } free_node(*node_allocator_, condition); free_node(*node_allocator_, branch); return error_node(); } else if (details::is_null_node(condition)) { free_node(*node_allocator_,condition); return branch; } else if (!brkcont) return node_allocator_->allocate<repeat_until_loop_node_t>(condition,branch); #ifndef exprtk_disable_break_continue else return node_allocator_->allocate<repeat_until_loop_bc_node_t>(condition,branch); #else return error_node(); #endif } inline expression_node_ptr for_loop(expression_node_ptr& initialiser, expression_node_ptr& condition, expression_node_ptr& incrementor, expression_node_ptr& loop_body, bool brkcont = false) const { if (!brkcont && details::is_constant_node(condition)) { expression_node_ptr result = error_node(); if (details::is_true(condition)) // Infinite loops are not allowed. result = error_node(); else result = node_allocator_->allocate<details::null_node<Type> >(); free_node(*node_allocator_, initialiser); free_node(*node_allocator_, condition); free_node(*node_allocator_, incrementor); free_node(*node_allocator_, loop_body); return result; } else if (details::is_null_node(condition)) { free_node(*node_allocator_, initialiser); free_node(*node_allocator_, condition); free_node(*node_allocator_, incrementor); return loop_body; } else if (!brkcont) return node_allocator_->allocate<for_loop_node_t> ( initialiser, condition, incrementor, loop_body ); #ifndef exprtk_disable_break_continue else return node_allocator_->allocate<for_loop_bc_node_t> ( initialiser, condition, incrementor, loop_body ); #else return error_node(); #endif } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr const_optimise_switch(Sequence<expression_node_ptr,Allocator>& arg_list) { expression_node_ptr result = error_node(); for (std::size_t i = 0; i < (arg_list.size() / 2); ++i) { expression_node_ptr condition = arg_list[(2 * i) ]; expression_node_ptr consequent = arg_list[(2 * i) + 1]; if ((0 == result) && details::is_true(condition)) { result = consequent; break; } } if (0 == result) { result = arg_list.back(); } for (std::size_t i = 0; i < arg_list.size(); ++i) { expression_node_ptr current_expr = arg_list[i]; if (current_expr && (current_expr != result)) { free_node(*node_allocator_,current_expr); } } return result; } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr const_optimise_mswitch(Sequence<expression_node_ptr,Allocator>& arg_list) { expression_node_ptr result = error_node(); for (std::size_t i = 0; i < (arg_list.size() / 2); ++i) { expression_node_ptr condition = arg_list[(2 * i) ]; expression_node_ptr consequent = arg_list[(2 * i) + 1]; if (details::is_true(condition)) { result = consequent; } } if (0 == result) { T zero = T(0); result = node_allocator_->allocate<literal_node_t>(zero); } for (std::size_t i = 0; i < arg_list.size(); ++i) { expression_node_ptr& current_expr = arg_list[i]; if (current_expr && (current_expr != result)) { free_node(*node_allocator_,current_expr); } } return result; } struct switch_nodes { typedef std::vector<expression_node_ptr> arg_list_t; #define case_stmt(N) \ if (is_true(arg[(2 * N)])) { return arg[(2 * N) + 1]->value(); } \ struct switch_1 { static inline T process(const arg_list_t& arg) { case_stmt(0) return arg.back()->value(); } }; struct switch_2 { static inline T process(const arg_list_t& arg) { case_stmt(0) case_stmt(1) return arg.back()->value(); } }; struct switch_3 { static inline T process(const arg_list_t& arg) { case_stmt(0) case_stmt(1) case_stmt(2) return arg.back()->value(); } }; struct switch_4 { static inline T process(const arg_list_t& arg) { case_stmt(0) case_stmt(1) case_stmt(2) case_stmt(3) return arg.back()->value(); } }; struct switch_5 { static inline T process(const arg_list_t& arg) { case_stmt(0) case_stmt(1) case_stmt(2) case_stmt(3) case_stmt(4) return arg.back()->value(); } }; struct switch_6 { static inline T process(const arg_list_t& arg) { case_stmt(0) case_stmt(1) case_stmt(2) case_stmt(3) case_stmt(4) case_stmt(5) return arg.back()->value(); } }; struct switch_7 { static inline T process(const arg_list_t& arg) { case_stmt(0) case_stmt(1) case_stmt(2) case_stmt(3) case_stmt(4) case_stmt(5) case_stmt(6) return arg.back()->value(); } }; #undef case_stmt }; template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr switch_statement(Sequence<expression_node_ptr,Allocator>& arg_list) { if (arg_list.empty()) return error_node(); else if ( !all_nodes_valid(arg_list) || (arg_list.size() < 3) || ((arg_list.size() % 2) != 1) ) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } else if (is_constant_foldable(arg_list)) return const_optimise_switch(arg_list); switch ((arg_list.size() - 1) / 2) { #define case_stmt(N) \ case N : \ return node_allocator_-> \ allocate<details::switch_n_node \ <Type,typename switch_nodes::switch_##N> >(arg_list); \ case_stmt(1) case_stmt(2) case_stmt(3) case_stmt(4) case_stmt(5) case_stmt(6) case_stmt(7) #undef case_stmt default : return node_allocator_->allocate<details::switch_node<Type> >(arg_list); } } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr multi_switch_statement(Sequence<expression_node_ptr,Allocator>& arg_list) { if (!all_nodes_valid(arg_list)) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } else if (is_constant_foldable(arg_list)) return const_optimise_mswitch(arg_list); else return node_allocator_->allocate<details::multi_switch_node<Type> >(arg_list); } #define unary_opr_switch_statements \ case_stmt(details:: e_abs, details:: abs_op) \ case_stmt(details:: e_acos, details:: acos_op) \ case_stmt(details::e_acosh, details::acosh_op) \ case_stmt(details:: e_asin, details:: asin_op) \ case_stmt(details::e_asinh, details::asinh_op) \ case_stmt(details:: e_atan, details:: atan_op) \ case_stmt(details::e_atanh, details::atanh_op) \ case_stmt(details:: e_ceil, details:: ceil_op) \ case_stmt(details:: e_cos, details:: cos_op) \ case_stmt(details:: e_cosh, details:: cosh_op) \ case_stmt(details:: e_exp, details:: exp_op) \ case_stmt(details::e_expm1, details::expm1_op) \ case_stmt(details::e_floor, details::floor_op) \ case_stmt(details:: e_log, details:: log_op) \ case_stmt(details::e_log10, details::log10_op) \ case_stmt(details:: e_log2, details:: log2_op) \ case_stmt(details::e_log1p, details::log1p_op) \ case_stmt(details:: e_neg, details:: neg_op) \ case_stmt(details:: e_pos, details:: pos_op) \ case_stmt(details::e_round, details::round_op) \ case_stmt(details:: e_sin, details:: sin_op) \ case_stmt(details:: e_sinc, details:: sinc_op) \ case_stmt(details:: e_sinh, details:: sinh_op) \ case_stmt(details:: e_sqrt, details:: sqrt_op) \ case_stmt(details:: e_tan, details:: tan_op) \ case_stmt(details:: e_tanh, details:: tanh_op) \ case_stmt(details:: e_cot, details:: cot_op) \ case_stmt(details:: e_sec, details:: sec_op) \ case_stmt(details:: e_csc, details:: csc_op) \ case_stmt(details:: e_r2d, details:: r2d_op) \ case_stmt(details:: e_d2r, details:: d2r_op) \ case_stmt(details:: e_d2g, details:: d2g_op) \ case_stmt(details:: e_g2d, details:: g2d_op) \ case_stmt(details:: e_notl, details:: notl_op) \ case_stmt(details:: e_sgn, details:: sgn_op) \ case_stmt(details:: e_erf, details:: erf_op) \ case_stmt(details:: e_erfc, details:: erfc_op) \ case_stmt(details:: e_ncdf, details:: ncdf_op) \ case_stmt(details:: e_frac, details:: frac_op) \ case_stmt(details::e_trunc, details::trunc_op) \ inline expression_node_ptr synthesize_uv_expression(const details::operator_type& operation, expression_node_ptr (&branch)[1]) { T& v = static_cast<details::variable_node<T>*>(branch[0])->ref(); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate<typename details::unary_variable_node<Type,op1<Type> > >(v); \ unary_opr_switch_statements #undef case_stmt default : return error_node(); } } inline expression_node_ptr synthesize_uvec_expression(const details::operator_type& operation, expression_node_ptr (&branch)[1]) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate<typename details::unary_vector_node<Type,op1<Type> > > \ (operation, branch[0]); \ unary_opr_switch_statements #undef case_stmt default : return error_node(); } } inline expression_node_ptr synthesize_unary_expression(const details::operator_type& operation, expression_node_ptr (&branch)[1]) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate<typename details::unary_branch_node<Type,op1<Type> > >(branch[0]); \ unary_opr_switch_statements #undef case_stmt default : return error_node(); } } inline expression_node_ptr const_optimise_sf3(const details::operator_type& operation, expression_node_ptr (&branch)[3]) { expression_node_ptr temp_node = error_node(); switch (operation) { #define case_stmt(op) \ case details::e_sf##op : temp_node = node_allocator_-> \ allocate<details::sf3_node<Type,details::sf##op##_op<Type> > > \ (operation, branch); \ break; \ case_stmt(00) case_stmt(01) case_stmt(02) case_stmt(03) case_stmt(04) case_stmt(05) case_stmt(06) case_stmt(07) case_stmt(08) case_stmt(09) case_stmt(10) case_stmt(11) case_stmt(12) case_stmt(13) case_stmt(14) case_stmt(15) case_stmt(16) case_stmt(17) case_stmt(18) case_stmt(19) case_stmt(20) case_stmt(21) case_stmt(22) case_stmt(23) case_stmt(24) case_stmt(25) case_stmt(26) case_stmt(27) case_stmt(28) case_stmt(29) case_stmt(30) case_stmt(31) case_stmt(32) case_stmt(33) case_stmt(34) case_stmt(35) case_stmt(36) case_stmt(37) case_stmt(38) case_stmt(39) case_stmt(40) case_stmt(41) case_stmt(42) case_stmt(43) case_stmt(44) case_stmt(45) case_stmt(46) case_stmt(47) #undef case_stmt default : return error_node(); } const T v = temp_node->value(); details::free_node(*node_allocator_,temp_node); return node_allocator_->allocate<literal_node_t>(v); } inline expression_node_ptr varnode_optimise_sf3(const details::operator_type& operation, expression_node_ptr (&branch)[3]) { typedef details::variable_node<Type>* variable_ptr; const Type& v0 = static_cast<variable_ptr>(branch[0])->ref(); const Type& v1 = static_cast<variable_ptr>(branch[1])->ref(); const Type& v2 = static_cast<variable_ptr>(branch[2])->ref(); switch (operation) { #define case_stmt(op) \ case details::e_sf##op : return node_allocator_-> \ allocate_rrr<details::sf3_var_node<Type,details::sf##op##_op<Type> > > \ (v0, v1, v2); \ case_stmt(00) case_stmt(01) case_stmt(02) case_stmt(03) case_stmt(04) case_stmt(05) case_stmt(06) case_stmt(07) case_stmt(08) case_stmt(09) case_stmt(10) case_stmt(11) case_stmt(12) case_stmt(13) case_stmt(14) case_stmt(15) case_stmt(16) case_stmt(17) case_stmt(18) case_stmt(19) case_stmt(20) case_stmt(21) case_stmt(22) case_stmt(23) case_stmt(24) case_stmt(25) case_stmt(26) case_stmt(27) case_stmt(28) case_stmt(29) case_stmt(30) case_stmt(31) case_stmt(32) case_stmt(33) case_stmt(34) case_stmt(35) case_stmt(36) case_stmt(37) case_stmt(38) case_stmt(39) case_stmt(40) case_stmt(41) case_stmt(42) case_stmt(43) case_stmt(44) case_stmt(45) case_stmt(46) case_stmt(47) #undef case_stmt default : return error_node(); } } inline expression_node_ptr special_function(const details::operator_type& operation, expression_node_ptr (&branch)[3]) { if (!all_nodes_valid(branch)) return error_node(); else if (is_constant_foldable(branch)) return const_optimise_sf3(operation,branch); else if (all_nodes_variables(branch)) return varnode_optimise_sf3(operation,branch); else { switch (operation) { #define case_stmt(op) \ case details::e_sf##op : return node_allocator_-> \ allocate<details::sf3_node<Type,details::sf##op##_op<Type> > > \ (operation, branch); \ case_stmt(00) case_stmt(01) case_stmt(02) case_stmt(03) case_stmt(04) case_stmt(05) case_stmt(06) case_stmt(07) case_stmt(08) case_stmt(09) case_stmt(10) case_stmt(11) case_stmt(12) case_stmt(13) case_stmt(14) case_stmt(15) case_stmt(16) case_stmt(17) case_stmt(18) case_stmt(19) case_stmt(20) case_stmt(21) case_stmt(22) case_stmt(23) case_stmt(24) case_stmt(25) case_stmt(26) case_stmt(27) case_stmt(28) case_stmt(29) case_stmt(30) case_stmt(31) case_stmt(32) case_stmt(33) case_stmt(34) case_stmt(35) case_stmt(36) case_stmt(37) case_stmt(38) case_stmt(39) case_stmt(40) case_stmt(41) case_stmt(42) case_stmt(43) case_stmt(44) case_stmt(45) case_stmt(46) case_stmt(47) #undef case_stmt default : return error_node(); } } } inline expression_node_ptr const_optimise_sf4(const details::operator_type& operation, expression_node_ptr (&branch)[4]) { expression_node_ptr temp_node = error_node(); switch (operation) { #define case_stmt(op) \ case details::e_sf##op : temp_node = node_allocator_-> \ allocate<details::sf4_node<Type,details::sf##op##_op<Type> > > \ (operation, branch); \ break; \ case_stmt(48) case_stmt(49) case_stmt(50) case_stmt(51) case_stmt(52) case_stmt(53) case_stmt(54) case_stmt(55) case_stmt(56) case_stmt(57) case_stmt(58) case_stmt(59) case_stmt(60) case_stmt(61) case_stmt(62) case_stmt(63) case_stmt(64) case_stmt(65) case_stmt(66) case_stmt(67) case_stmt(68) case_stmt(69) case_stmt(70) case_stmt(71) case_stmt(72) case_stmt(73) case_stmt(74) case_stmt(75) case_stmt(76) case_stmt(77) case_stmt(78) case_stmt(79) case_stmt(80) case_stmt(81) case_stmt(82) case_stmt(83) case_stmt(84) case_stmt(85) case_stmt(86) case_stmt(87) case_stmt(88) case_stmt(89) case_stmt(90) case_stmt(91) case_stmt(92) case_stmt(93) case_stmt(94) case_stmt(95) case_stmt(96) case_stmt(97) case_stmt(98) case_stmt(99) #undef case_stmt default : return error_node(); } const T v = temp_node->value(); details::free_node(*node_allocator_,temp_node); return node_allocator_->allocate<literal_node_t>(v); } inline expression_node_ptr varnode_optimise_sf4(const details::operator_type& operation, expression_node_ptr (&branch)[4]) { typedef details::variable_node<Type>* variable_ptr; const Type& v0 = static_cast<variable_ptr>(branch[0])->ref(); const Type& v1 = static_cast<variable_ptr>(branch[1])->ref(); const Type& v2 = static_cast<variable_ptr>(branch[2])->ref(); const Type& v3 = static_cast<variable_ptr>(branch[3])->ref(); switch (operation) { #define case_stmt(op) \ case details::e_sf##op : return node_allocator_-> \ allocate_rrrr<details::sf4_var_node<Type,details::sf##op##_op<Type> > > \ (v0, v1, v2, v3); \ case_stmt(48) case_stmt(49) case_stmt(50) case_stmt(51) case_stmt(52) case_stmt(53) case_stmt(54) case_stmt(55) case_stmt(56) case_stmt(57) case_stmt(58) case_stmt(59) case_stmt(60) case_stmt(61) case_stmt(62) case_stmt(63) case_stmt(64) case_stmt(65) case_stmt(66) case_stmt(67) case_stmt(68) case_stmt(69) case_stmt(70) case_stmt(71) case_stmt(72) case_stmt(73) case_stmt(74) case_stmt(75) case_stmt(76) case_stmt(77) case_stmt(78) case_stmt(79) case_stmt(80) case_stmt(81) case_stmt(82) case_stmt(83) case_stmt(84) case_stmt(85) case_stmt(86) case_stmt(87) case_stmt(88) case_stmt(89) case_stmt(90) case_stmt(91) case_stmt(92) case_stmt(93) case_stmt(94) case_stmt(95) case_stmt(96) case_stmt(97) case_stmt(98) case_stmt(99) #undef case_stmt default : return error_node(); } } inline expression_node_ptr special_function(const details::operator_type& operation, expression_node_ptr (&branch)[4]) { if (!all_nodes_valid(branch)) return error_node(); else if (is_constant_foldable(branch)) return const_optimise_sf4(operation,branch); else if (all_nodes_variables(branch)) return varnode_optimise_sf4(operation,branch); switch (operation) { #define case_stmt(op) \ case details::e_sf##op : return node_allocator_-> \ allocate<details::sf4_node<Type,details::sf##op##_op<Type> > > \ (operation, branch); \ case_stmt(48) case_stmt(49) case_stmt(50) case_stmt(51) case_stmt(52) case_stmt(53) case_stmt(54) case_stmt(55) case_stmt(56) case_stmt(57) case_stmt(58) case_stmt(59) case_stmt(60) case_stmt(61) case_stmt(62) case_stmt(63) case_stmt(64) case_stmt(65) case_stmt(66) case_stmt(67) case_stmt(68) case_stmt(69) case_stmt(70) case_stmt(71) case_stmt(72) case_stmt(73) case_stmt(74) case_stmt(75) case_stmt(76) case_stmt(77) case_stmt(78) case_stmt(79) case_stmt(80) case_stmt(81) case_stmt(82) case_stmt(83) case_stmt(84) case_stmt(85) case_stmt(86) case_stmt(87) case_stmt(88) case_stmt(89) case_stmt(90) case_stmt(91) case_stmt(92) case_stmt(93) case_stmt(94) case_stmt(95) case_stmt(96) case_stmt(97) case_stmt(98) case_stmt(99) #undef case_stmt default : return error_node(); } } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr const_optimise_varargfunc(const details::operator_type& operation, Sequence<expression_node_ptr,Allocator>& arg_list) { expression_node_ptr temp_node = error_node(); switch (operation) { #define case_stmt(op0,op1) \ case op0 : temp_node = node_allocator_-> \ allocate<details::vararg_node<Type,op1<Type> > > \ (arg_list); \ break; \ case_stmt(details::e_sum , details::vararg_add_op ) case_stmt(details::e_prod , details::vararg_mul_op ) case_stmt(details::e_avg , details::vararg_avg_op ) case_stmt(details::e_min , details::vararg_min_op ) case_stmt(details::e_max , details::vararg_max_op ) case_stmt(details::e_mand , details::vararg_mand_op ) case_stmt(details::e_mor , details::vararg_mor_op ) case_stmt(details::e_multi , details::vararg_multi_op) #undef case_stmt default : return error_node(); } const T v = temp_node->value(); details::free_node(*node_allocator_,temp_node); return node_allocator_->allocate<literal_node_t>(v); } inline bool special_one_parameter_vararg(const details::operator_type& operation) { return ( (details::e_sum == operation) || (details::e_prod == operation) || (details::e_avg == operation) || (details::e_min == operation) || (details::e_max == operation) ); } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr varnode_optimise_varargfunc(const details::operator_type& operation, Sequence<expression_node_ptr,Allocator>& arg_list) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate<details::vararg_varnode<Type,op1<Type> > >(arg_list); \ case_stmt(details::e_sum , details::vararg_add_op ) case_stmt(details::e_prod , details::vararg_mul_op ) case_stmt(details::e_avg , details::vararg_avg_op ) case_stmt(details::e_min , details::vararg_min_op ) case_stmt(details::e_max , details::vararg_max_op ) case_stmt(details::e_mand , details::vararg_mand_op ) case_stmt(details::e_mor , details::vararg_mor_op ) case_stmt(details::e_multi , details::vararg_multi_op) #undef case_stmt default : return error_node(); } } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr vectorize_func(const details::operator_type& operation, Sequence<expression_node_ptr,Allocator>& arg_list) { if (1 == arg_list.size()) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate<details::vectorize_node<Type,op1<Type> > >(arg_list[0]); \ case_stmt(details::e_sum , details::vec_add_op) case_stmt(details::e_prod , details::vec_mul_op) case_stmt(details::e_avg , details::vec_avg_op) case_stmt(details::e_min , details::vec_min_op) case_stmt(details::e_max , details::vec_max_op) #undef case_stmt default : return error_node(); } } else return error_node(); } template <typename Allocator, template <typename,typename> class Sequence> inline expression_node_ptr vararg_function(const details::operator_type& operation, Sequence<expression_node_ptr,Allocator>& arg_list) { if (!all_nodes_valid(arg_list)) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } else if (is_constant_foldable(arg_list)) return const_optimise_varargfunc(operation,arg_list); else if ((arg_list.size() == 1) && details::is_ivector_node(arg_list[0])) return vectorize_func(operation,arg_list); else if ((arg_list.size() == 1) && special_one_parameter_vararg(operation)) return arg_list[0]; else if (all_nodes_variables(arg_list)) return varnode_optimise_varargfunc(operation,arg_list); #ifndef exprtk_disable_string_capabilities if (details::e_smulti == operation) { return node_allocator_-> allocate<details::str_vararg_node<Type,details::vararg_multi_op<Type> > >(arg_list); } else #endif { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate<details::vararg_node<Type,op1<Type> > >(arg_list); \ case_stmt(details::e_sum , details::vararg_add_op ) case_stmt(details::e_prod , details::vararg_mul_op ) case_stmt(details::e_avg , details::vararg_avg_op ) case_stmt(details::e_min , details::vararg_min_op ) case_stmt(details::e_max , details::vararg_max_op ) case_stmt(details::e_mand , details::vararg_mand_op ) case_stmt(details::e_mor , details::vararg_mor_op ) case_stmt(details::e_multi , details::vararg_multi_op) #undef case_stmt default : return error_node(); } } } template <std::size_t N> inline expression_node_ptr function(ifunction_t* f, expression_node_ptr (&b)[N]) { typedef typename details::function_N_node<T,ifunction_t,N> function_N_node_t; expression_node_ptr result = synthesize_expression<function_N_node_t,N>(f,b); if (0 == result) return error_node(); else { // Can the function call be completely optimised? if (details::is_constant_node(result)) return result; else if (!all_nodes_valid(b)) return error_node(); else if (N != f->param_count) { details::free_all_nodes(*node_allocator_,b); return error_node(); } function_N_node_t* func_node_ptr = static_cast<function_N_node_t*>(result); if (func_node_ptr->init_branches(b)) return result; else { details::free_all_nodes(*node_allocator_,b); return error_node(); } } } inline expression_node_ptr function(ifunction_t* f) { typedef typename details::function_N_node<Type,ifunction_t,0> function_N_node_t; return node_allocator_->allocate<function_N_node_t>(f); } inline expression_node_ptr vararg_function_call(ivararg_function_t* vaf, std::vector<expression_node_ptr>& arg_list) { if (!all_nodes_valid(arg_list)) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } typedef details::vararg_function_node<Type,ivararg_function_t> alloc_type; expression_node_ptr result = node_allocator_->allocate<alloc_type>(vaf,arg_list); if ( !arg_list.empty() && !vaf->has_side_effects() && is_constant_foldable(arg_list) ) { const Type v = result->value(); details::free_node(*node_allocator_,result); result = node_allocator_->allocate<literal_node_t>(v); } parser_->state_.activate_side_effect("vararg_function_call()"); return result; } inline expression_node_ptr generic_function_call(igeneric_function_t* gf, std::vector<expression_node_ptr>& arg_list, const std::size_t& param_seq_index = std::numeric_limits<std::size_t>::max()) { if (!all_nodes_valid(arg_list)) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } typedef details::generic_function_node <Type,igeneric_function_t> alloc_type1; typedef details::multimode_genfunction_node<Type,igeneric_function_t> alloc_type2; const std::size_t no_psi = std::numeric_limits<std::size_t>::max(); expression_node_ptr result = error_node(); if (no_psi == param_seq_index) result = node_allocator_->allocate<alloc_type1>(arg_list,gf); else result = node_allocator_->allocate<alloc_type2>(gf, param_seq_index, arg_list); alloc_type1* genfunc_node_ptr = static_cast<alloc_type1*>(result); if ( !arg_list.empty() && !gf->has_side_effects() && parser_->state_.type_check_enabled && is_constant_foldable(arg_list) ) { genfunc_node_ptr->init_branches(); const Type v = result->value(); details::free_node(*node_allocator_,result); return node_allocator_->allocate<literal_node_t>(v); } else if (genfunc_node_ptr->init_branches()) { parser_->state_.activate_side_effect("generic_function_call()"); return result; } else { details::free_node(*node_allocator_, result); details::free_all_nodes(*node_allocator_, arg_list); return error_node(); } } #ifndef exprtk_disable_string_capabilities inline expression_node_ptr string_function_call(igeneric_function_t* gf, std::vector<expression_node_ptr>& arg_list, const std::size_t& param_seq_index = std::numeric_limits<std::size_t>::max()) { if (!all_nodes_valid(arg_list)) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } typedef details::string_function_node <Type,igeneric_function_t> alloc_type1; typedef details::multimode_strfunction_node<Type,igeneric_function_t> alloc_type2; const std::size_t no_psi = std::numeric_limits<std::size_t>::max(); expression_node_ptr result = error_node(); if (no_psi == param_seq_index) result = node_allocator_->allocate<alloc_type1>(gf,arg_list); else result = node_allocator_->allocate<alloc_type2>(gf, param_seq_index, arg_list); alloc_type1* strfunc_node_ptr = static_cast<alloc_type1*>(result); if ( !arg_list.empty() && !gf->has_side_effects() && is_constant_foldable(arg_list) ) { strfunc_node_ptr->init_branches(); const Type v = result->value(); details::free_node(*node_allocator_,result); return node_allocator_->allocate<literal_node_t>(v); } else if (strfunc_node_ptr->init_branches()) { parser_->state_.activate_side_effect("string_function_call()"); return result; } else { details::free_node (*node_allocator_,result ); details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } } #endif #ifndef exprtk_disable_return_statement inline expression_node_ptr return_call(std::vector<expression_node_ptr>& arg_list) { if (!all_nodes_valid(arg_list)) { details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } typedef details::return_node<Type> alloc_type; expression_node_ptr result = node_allocator_-> allocate_rr<alloc_type>(arg_list,parser_->results_ctx()); alloc_type* return_node_ptr = static_cast<alloc_type*>(result); if (return_node_ptr->init_branches()) { parser_->state_.activate_side_effect("return_call()"); return result; } else { details::free_node (*node_allocator_,result ); details::free_all_nodes(*node_allocator_,arg_list); return error_node(); } } inline expression_node_ptr return_envelope(expression_node_ptr body, results_context_t* rc, bool*& return_invoked) { typedef details::return_envelope_node<Type> alloc_type; expression_node_ptr result = node_allocator_-> allocate_cr<alloc_type>(body,(*rc)); return_invoked = static_cast<alloc_type*>(result)->retinvk_ptr(); return result; } #else inline expression_node_ptr return_call(std::vector<expression_node_ptr>&) { return error_node(); } inline expression_node_ptr return_envelope(expression_node_ptr, results_context_t*, bool*&) { return error_node(); } #endif inline expression_node_ptr vector_element(const std::string& symbol, vector_holder_ptr vector_base, expression_node_ptr index) { expression_node_ptr result = error_node(); if (details::is_constant_node(index)) { std::size_t i = static_cast<std::size_t>(details::numeric::to_int64(index->value())); details::free_node(*node_allocator_,index); if (vector_base->rebaseable()) { return node_allocator_->allocate<rebasevector_celem_node_t>(i,vector_base); } scope_element& se = parser_->sem_.get_element(symbol,i); if (se.index == i) { result = se.var_node; } else { scope_element nse; nse.name = symbol; nse.active = true; nse.ref_count = 1; nse.type = scope_element::e_vecelem; nse.index = i; nse.depth = parser_->state_.scope_depth; nse.data = 0; nse.var_node = node_allocator_->allocate<variable_node_t>((*(*vector_base)[i])); if (!parser_->sem_.add_element(nse)) { parser_->set_synthesis_error("Failed to add new local vector element to SEM [1]"); parser_->sem_.free_element(nse); result = error_node(); } exprtk_debug(("vector_element() - INFO - Added new local vector element: %s\n",nse.name.c_str())); parser_->state_.activate_side_effect("vector_element()"); result = nse.var_node; } } else if (vector_base->rebaseable()) result = node_allocator_->allocate<rebasevector_elem_node_t>(index,vector_base); else result = node_allocator_->allocate<vector_elem_node_t>(index,vector_base); return result; } private: template <std::size_t N, typename NodePtr> inline bool is_constant_foldable(NodePtr (&b)[N]) const { for (std::size_t i = 0; i < N; ++i) { if (0 == b[i]) return false; else if (!details::is_constant_node(b[i])) return false; } return true; } template <typename NodePtr, typename Allocator, template <typename,typename> class Sequence> inline bool is_constant_foldable(const Sequence<NodePtr,Allocator>& b) const { for (std::size_t i = 0; i < b.size(); ++i) { if (0 == b[i]) return false; else if (!details::is_constant_node(b[i])) return false; } return true; } void lodge_assignment(symbol_type cst, expression_node_ptr node) { parser_->state_.activate_side_effect("lodge_assignment()"); if (!parser_->dec_.collect_assignments()) return; std::string symbol_name; switch (cst) { case e_st_variable : symbol_name = parser_->symtab_store_ .get_variable_name(node); break; #ifndef exprtk_disable_string_capabilities case e_st_string : symbol_name = parser_->symtab_store_ .get_stringvar_name(node); break; #endif case e_st_vector : { typedef details::vector_holder<T> vector_holder_t; vector_holder_t& vh = static_cast<vector_node_t*>(node)->vec_holder(); symbol_name = parser_->symtab_store_.get_vector_name(&vh); } break; case e_st_vecelem : { typedef details::vector_holder<T> vector_holder_t; vector_holder_t& vh = static_cast<vector_elem_node_t*>(node)->vec_holder(); symbol_name = parser_->symtab_store_.get_vector_name(&vh); cst = e_st_vector; } break; default : return; } if (!symbol_name.empty()) { parser_->dec_.add_assignment(symbol_name,cst); } } inline expression_node_ptr synthesize_assignment_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { if (details::is_variable_node(branch[0])) { lodge_assignment(e_st_variable,branch[0]); return synthesize_expression<assignment_node_t,2>(operation,branch); } else if (details::is_vector_elem_node(branch[0])) { lodge_assignment(e_st_vecelem,branch[0]); return synthesize_expression<assignment_vec_elem_node_t, 2>(operation, branch); } else if (details::is_rebasevector_elem_node(branch[0])) { lodge_assignment(e_st_vecelem,branch[0]); return synthesize_expression<assignment_rebasevec_elem_node_t, 2>(operation, branch); } else if (details::is_rebasevector_celem_node(branch[0])) { lodge_assignment(e_st_vecelem,branch[0]); return synthesize_expression<assignment_rebasevec_celem_node_t, 2>(operation, branch); } #ifndef exprtk_disable_string_capabilities else if (details::is_string_node(branch[0])) { lodge_assignment(e_st_string,branch[0]); return synthesize_expression<assignment_string_node_t,2>(operation, branch); } else if (details::is_string_range_node(branch[0])) { lodge_assignment(e_st_string,branch[0]); return synthesize_expression<assignment_string_range_node_t,2>(operation, branch); } #endif else if (details::is_vector_node(branch[0])) { lodge_assignment(e_st_vector,branch[0]); if (details::is_ivector_node(branch[1])) return synthesize_expression<assignment_vecvec_node_t,2>(operation, branch); else return synthesize_expression<assignment_vec_node_t,2>(operation, branch); } else { parser_->set_synthesis_error("Invalid assignment operation.[1]"); return error_node(); } } inline expression_node_ptr synthesize_assignment_operation_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { if (details::is_variable_node(branch[0])) { lodge_assignment(e_st_variable,branch[0]); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::assignment_op_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ case_stmt(details::e_addass,details::add_op) case_stmt(details::e_subass,details::sub_op) case_stmt(details::e_mulass,details::mul_op) case_stmt(details::e_divass,details::div_op) case_stmt(details::e_modass,details::mod_op) #undef case_stmt default : return error_node(); } } else if (details::is_vector_elem_node(branch[0])) { lodge_assignment(e_st_vecelem,branch[0]); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::assignment_vec_elem_op_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ case_stmt(details::e_addass,details::add_op) case_stmt(details::e_subass,details::sub_op) case_stmt(details::e_mulass,details::mul_op) case_stmt(details::e_divass,details::div_op) case_stmt(details::e_modass,details::mod_op) #undef case_stmt default : return error_node(); } } else if (details::is_rebasevector_elem_node(branch[0])) { lodge_assignment(e_st_vecelem,branch[0]); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::assignment_rebasevec_elem_op_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ case_stmt(details::e_addass,details::add_op) case_stmt(details::e_subass,details::sub_op) case_stmt(details::e_mulass,details::mul_op) case_stmt(details::e_divass,details::div_op) case_stmt(details::e_modass,details::mod_op) #undef case_stmt default : return error_node(); } } else if (details::is_rebasevector_celem_node(branch[0])) { lodge_assignment(e_st_vecelem,branch[0]); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::assignment_rebasevec_celem_op_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ case_stmt(details::e_addass,details::add_op) case_stmt(details::e_subass,details::sub_op) case_stmt(details::e_mulass,details::mul_op) case_stmt(details::e_divass,details::div_op) case_stmt(details::e_modass,details::mod_op) #undef case_stmt default : return error_node(); } } else if (details::is_vector_node(branch[0])) { lodge_assignment(e_st_vector,branch[0]); if (details::is_ivector_node(branch[1])) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::assignment_vecvec_op_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ case_stmt(details::e_addass,details::add_op) case_stmt(details::e_subass,details::sub_op) case_stmt(details::e_mulass,details::mul_op) case_stmt(details::e_divass,details::div_op) case_stmt(details::e_modass,details::mod_op) #undef case_stmt default : return error_node(); } } else { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::assignment_vec_op_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ case_stmt(details::e_addass,details::add_op) case_stmt(details::e_subass,details::sub_op) case_stmt(details::e_mulass,details::mul_op) case_stmt(details::e_divass,details::div_op) case_stmt(details::e_modass,details::mod_op) #undef case_stmt default : return error_node(); } } } #ifndef exprtk_disable_string_capabilities else if ( (details::e_addass == operation) && details::is_string_node(branch[0]) ) { typedef details::assignment_string_node<T,details::asn_addassignment> addass_t; lodge_assignment(e_st_string,branch[0]); return synthesize_expression<addass_t,2>(operation,branch); } #endif else { parser_->set_synthesis_error("Invalid assignment operation[2]"); return error_node(); } } inline expression_node_ptr synthesize_veceqineqlogic_operation_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const bool is_b0_ivec = details::is_ivector_node(branch[0]); const bool is_b1_ivec = details::is_ivector_node(branch[1]); #define batch_eqineq_logic_case \ case_stmt(details:: e_lt, details:: lt_op) \ case_stmt(details:: e_lte, details:: lte_op) \ case_stmt(details:: e_gt, details:: gt_op) \ case_stmt(details:: e_gte, details:: gte_op) \ case_stmt(details:: e_eq, details:: eq_op) \ case_stmt(details:: e_ne, details:: ne_op) \ case_stmt(details::e_equal, details::equal_op) \ case_stmt(details:: e_and, details:: and_op) \ case_stmt(details:: e_nand, details:: nand_op) \ case_stmt(details:: e_or, details:: or_op) \ case_stmt(details:: e_nor, details:: nor_op) \ case_stmt(details:: e_xor, details:: xor_op) \ case_stmt(details:: e_xnor, details:: xnor_op) \ if (is_b0_ivec && is_b1_ivec) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::vec_binop_vecvec_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ batch_eqineq_logic_case #undef case_stmt default : return error_node(); } } else if (is_b0_ivec && !is_b1_ivec) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::vec_binop_vecval_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ batch_eqineq_logic_case #undef case_stmt default : return error_node(); } } else if (!is_b0_ivec && is_b1_ivec) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::vec_binop_valvec_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ batch_eqineq_logic_case #undef case_stmt default : return error_node(); } } else return error_node(); #undef batch_eqineq_logic_case } inline expression_node_ptr synthesize_vecarithmetic_operation_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const bool is_b0_ivec = details::is_ivector_node(branch[0]); const bool is_b1_ivec = details::is_ivector_node(branch[1]); #define vector_ops \ case_stmt(details::e_add,details::add_op) \ case_stmt(details::e_sub,details::sub_op) \ case_stmt(details::e_mul,details::mul_op) \ case_stmt(details::e_div,details::div_op) \ case_stmt(details::e_mod,details::mod_op) \ if (is_b0_ivec && is_b1_ivec) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::vec_binop_vecvec_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ vector_ops case_stmt(details::e_pow,details:: pow_op) #undef case_stmt default : return error_node(); } } else if (is_b0_ivec && !is_b1_ivec) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::vec_binop_vecval_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ vector_ops case_stmt(details::e_pow,details:: pow_op) #undef case_stmt default : return error_node(); } } else if (!is_b0_ivec && is_b1_ivec) { switch (operation) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ template allocate_rrr<typename details::vec_binop_valvec_node<Type,op1<Type> > > \ (operation, branch[0], branch[1]); \ vector_ops #undef case_stmt default : return error_node(); } } else return error_node(); #undef vector_ops } inline expression_node_ptr synthesize_swap_expression(expression_node_ptr (&branch)[2]) { const bool v0_is_ivar = details::is_ivariable_node(branch[0]); const bool v1_is_ivar = details::is_ivariable_node(branch[1]); const bool v0_is_ivec = details::is_ivector_node (branch[0]); const bool v1_is_ivec = details::is_ivector_node (branch[1]); #ifndef exprtk_disable_string_capabilities const bool v0_is_str = details::is_generally_string_node(branch[0]); const bool v1_is_str = details::is_generally_string_node(branch[1]); #endif expression_node_ptr result = error_node(); if (v0_is_ivar && v1_is_ivar) { typedef details::variable_node<T>* variable_node_ptr; variable_node_ptr v0 = variable_node_ptr(0); variable_node_ptr v1 = variable_node_ptr(0); if ( (0 != (v0 = dynamic_cast<variable_node_ptr>(branch[0]))) && (0 != (v1 = dynamic_cast<variable_node_ptr>(branch[1]))) ) { result = node_allocator_->allocate<details::swap_node<T> >(v0,v1); } else result = node_allocator_->allocate<details::swap_generic_node<T> >(branch[0],branch[1]); } else if (v0_is_ivec && v1_is_ivec) { result = node_allocator_->allocate<details::swap_vecvec_node<T> >(branch[0],branch[1]); } #ifndef exprtk_disable_string_capabilities else if (v0_is_str && v1_is_str) { if (is_string_node(branch[0]) && is_string_node(branch[1])) result = node_allocator_->allocate<details::swap_string_node<T> > (branch[0], branch[1]); else result = node_allocator_->allocate<details::swap_genstrings_node<T> > (branch[0], branch[1]); } #endif else { parser_->set_synthesis_error("Only variables, strings, vectors or vector elements can be swapped"); return error_node(); } parser_->state_.activate_side_effect("synthesize_swap_expression()"); return result; } #ifndef exprtk_disable_sc_andor inline expression_node_ptr synthesize_shortcircuit_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { expression_node_ptr result = error_node(); if (details::is_constant_node(branch[0])) { if ( (details::e_scand == operation) && std::equal_to<T>()(T(0),branch[0]->value()) ) result = node_allocator_->allocate_c<literal_node_t>(T(0)); else if ( (details::e_scor == operation) && std::not_equal_to<T>()(T(0),branch[0]->value()) ) result = node_allocator_->allocate_c<literal_node_t>(T(1)); } if (details::is_constant_node(branch[1]) && (0 == result)) { if ( (details::e_scand == operation) && std::equal_to<T>()(T(0),branch[1]->value()) ) result = node_allocator_->allocate_c<literal_node_t>(T(0)); else if ( (details::e_scor == operation) && std::not_equal_to<T>()(T(0),branch[1]->value()) ) result = node_allocator_->allocate_c<literal_node_t>(T(1)); } if (result) { free_node(*node_allocator_, branch[0]); free_node(*node_allocator_, branch[1]); return result; } else if (details::e_scand == operation) { return synthesize_expression<scand_node_t,2>(operation, branch); } else if (details::e_scor == operation) { return synthesize_expression<scor_node_t,2>(operation, branch); } else return error_node(); } #else inline expression_node_ptr synthesize_shortcircuit_expression(const details::operator_type&, expression_node_ptr (&)[2]) { return error_node(); } #endif #define basic_opr_switch_statements \ case_stmt(details::e_add, details::add_op) \ case_stmt(details::e_sub, details::sub_op) \ case_stmt(details::e_mul, details::mul_op) \ case_stmt(details::e_div, details::div_op) \ case_stmt(details::e_mod, details::mod_op) \ case_stmt(details::e_pow, details::pow_op) \ #define extended_opr_switch_statements \ case_stmt(details:: e_lt, details:: lt_op) \ case_stmt(details:: e_lte, details:: lte_op) \ case_stmt(details:: e_gt, details:: gt_op) \ case_stmt(details:: e_gte, details:: gte_op) \ case_stmt(details:: e_eq, details:: eq_op) \ case_stmt(details:: e_ne, details:: ne_op) \ case_stmt(details:: e_and, details:: and_op) \ case_stmt(details::e_nand, details::nand_op) \ case_stmt(details:: e_or, details:: or_op) \ case_stmt(details:: e_nor, details:: nor_op) \ case_stmt(details:: e_xor, details:: xor_op) \ case_stmt(details::e_xnor, details::xnor_op) \ #ifndef exprtk_disable_cardinal_pow_optimisation template <typename TType, template <typename,typename> class IPowNode> inline expression_node_ptr cardinal_pow_optimisation_impl(const TType& v, const unsigned int& p) { switch (p) { #define case_stmt(cp) \ case cp : return node_allocator_-> \ allocate<IPowNode<T,details::numeric::fast_exp<T,cp> > >(v); \ case_stmt( 1) case_stmt( 2) case_stmt( 3) case_stmt( 4) case_stmt( 5) case_stmt( 6) case_stmt( 7) case_stmt( 8) case_stmt( 9) case_stmt(10) case_stmt(11) case_stmt(12) case_stmt(13) case_stmt(14) case_stmt(15) case_stmt(16) case_stmt(17) case_stmt(18) case_stmt(19) case_stmt(20) case_stmt(21) case_stmt(22) case_stmt(23) case_stmt(24) case_stmt(25) case_stmt(26) case_stmt(27) case_stmt(28) case_stmt(29) case_stmt(30) case_stmt(31) case_stmt(32) case_stmt(33) case_stmt(34) case_stmt(35) case_stmt(36) case_stmt(37) case_stmt(38) case_stmt(39) case_stmt(40) case_stmt(41) case_stmt(42) case_stmt(43) case_stmt(44) case_stmt(45) case_stmt(46) case_stmt(47) case_stmt(48) case_stmt(49) case_stmt(50) case_stmt(51) case_stmt(52) case_stmt(53) case_stmt(54) case_stmt(55) case_stmt(56) case_stmt(57) case_stmt(58) case_stmt(59) case_stmt(60) #undef case_stmt default : return error_node(); } } inline expression_node_ptr cardinal_pow_optimisation(const T& v, const T& c) { const bool not_recipricol = (c >= T(0)); const unsigned int p = static_cast<unsigned int>(details::numeric::to_int32(details::numeric::abs(c))); if (0 == p) return node_allocator_->allocate_c<literal_node_t>(T(1)); else if (std::equal_to<T>()(T(2),c)) { return node_allocator_-> template allocate_rr<typename details::vov_node<Type,details::mul_op<Type> > >(v,v); } else { if (not_recipricol) return cardinal_pow_optimisation_impl<T,details::ipow_node>(v,p); else return cardinal_pow_optimisation_impl<T,details::ipowinv_node>(v,p); } } inline bool cardinal_pow_optimisable(const details::operator_type& operation, const T& c) { return (details::e_pow == operation) && (details::numeric::abs(c) <= T(60)) && details::numeric::is_integer(c); } inline expression_node_ptr cardinal_pow_optimisation(expression_node_ptr (&branch)[2]) { const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); const bool not_recipricol = (c >= T(0)); const unsigned int p = static_cast<unsigned int>(details::numeric::to_int32(details::numeric::abs(c))); node_allocator_->free(branch[1]); if (0 == p) { details::free_all_nodes(*node_allocator_, branch); return node_allocator_->allocate_c<literal_node_t>(T(1)); } else if (not_recipricol) return cardinal_pow_optimisation_impl<expression_node_ptr,details::bipow_node>(branch[0],p); else return cardinal_pow_optimisation_impl<expression_node_ptr,details::bipowninv_node>(branch[0],p); } #else inline expression_node_ptr cardinal_pow_optimisation(T&, const T&) { return error_node(); } inline bool cardinal_pow_optimisable(const details::operator_type&, const T&) { return false; } inline expression_node_ptr cardinal_pow_optimisation(expression_node_ptr(&)[2]) { return error_node(); } #endif struct synthesize_binary_ext_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const bool left_neg = is_neg_unary_node(branch[0]); const bool right_neg = is_neg_unary_node(branch[1]); if (left_neg && right_neg) { if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) ) { if ( !expr_gen.parser_->simplify_unary_negation_branch(branch[0]) || !expr_gen.parser_->simplify_unary_negation_branch(branch[1]) ) { details::free_all_nodes(*expr_gen.node_allocator_,branch); return error_node(); } } switch (operation) { // -f(x + 1) + -g(y + 1) --> -(f(x + 1) + g(y + 1)) case details::e_add : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::add_op<Type> > > (branch[0],branch[1])); // -f(x + 1) - -g(y + 1) --> g(y + 1) - f(x + 1) case details::e_sub : return expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::sub_op<Type> > > (branch[1],branch[0]); default : break; } } else if (left_neg && !right_neg) { if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) ) { if (!expr_gen.parser_->simplify_unary_negation_branch(branch[0])) { details::free_all_nodes(*expr_gen.node_allocator_,branch); return error_node(); } switch (operation) { // -f(x + 1) + g(y + 1) --> g(y + 1) - f(x + 1) case details::e_add : return expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::sub_op<Type> > > (branch[1], branch[0]); // -f(x + 1) - g(y + 1) --> -(f(x + 1) + g(y + 1)) case details::e_sub : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::add_op<Type> > > (branch[0], branch[1])); // -f(x + 1) * g(y + 1) --> -(f(x + 1) * g(y + 1)) case details::e_mul : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::mul_op<Type> > > (branch[0], branch[1])); // -f(x + 1) / g(y + 1) --> -(f(x + 1) / g(y + 1)) case details::e_div : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::div_op<Type> > > (branch[0], branch[1])); default : return error_node(); } } } else if (!left_neg && right_neg) { if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) ) { if (!expr_gen.parser_->simplify_unary_negation_branch(branch[1])) { details::free_all_nodes(*expr_gen.node_allocator_,branch); return error_node(); } switch (operation) { // f(x + 1) + -g(y + 1) --> f(x + 1) - g(y + 1) case details::e_add : return expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::sub_op<Type> > > (branch[0], branch[1]); // f(x + 1) - - g(y + 1) --> f(x + 1) + g(y + 1) case details::e_sub : return expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::add_op<Type> > > (branch[0], branch[1]); // f(x + 1) * -g(y + 1) --> -(f(x + 1) * g(y + 1)) case details::e_mul : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::mul_op<Type> > > (branch[0], branch[1])); // f(x + 1) / -g(y + 1) --> -(f(x + 1) / g(y + 1)) case details::e_div : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate<typename details::binary_ext_node<Type,details::div_op<Type> > > (branch[0], branch[1])); default : return error_node(); } } } switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate<typename details::binary_ext_node<Type,op1<Type> > > \ (branch[0], branch[1]); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_vob_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type& v = static_cast<details::variable_node<Type>*>(branch[0])->ref(); #ifndef exprtk_disable_enhanced_features if (details::is_sf3ext_node(branch[1])) { expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile_right<vtype> (expr_gen, v, operation, branch[1], result); if (synthesis_result) { free_node(*expr_gen.node_allocator_,branch[1]); return result; } } #endif if ( (details::e_mul == operation) || (details::e_div == operation) ) { if (details::is_uv_node(branch[1])) { typedef details::uv_base_node<Type>* uvbn_ptr_t; details::operator_type o = static_cast<uvbn_ptr_t>(branch[1])->operation(); if (details::e_neg == o) { const Type& v1 = static_cast<uvbn_ptr_t>(branch[1])->v(); free_node(*expr_gen.node_allocator_,branch[1]); switch (operation) { case details::e_mul : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate_rr<typename details:: vov_node<Type,details::mul_op<Type> > >(v,v1)); case details::e_div : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate_rr<typename details:: vov_node<Type,details::div_op<Type> > >(v,v1)); default : break; } } } } switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_rc<typename details::vob_node<Type,op1<Type> > > \ (v, branch[1]); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_bov_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type& v = static_cast<details::variable_node<Type>*>(branch[1])->ref(); #ifndef exprtk_disable_enhanced_features if (details::is_sf3ext_node(branch[0])) { expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile_left<vtype> (expr_gen, v, operation, branch[0], result); if (synthesis_result) { free_node(*expr_gen.node_allocator_, branch[0]); return result; } } #endif if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) ) { if (details::is_uv_node(branch[0])) { typedef details::uv_base_node<Type>* uvbn_ptr_t; details::operator_type o = static_cast<uvbn_ptr_t>(branch[0])->operation(); if (details::e_neg == o) { const Type& v0 = static_cast<uvbn_ptr_t>(branch[0])->v(); free_node(*expr_gen.node_allocator_,branch[0]); switch (operation) { case details::e_add : return expr_gen.node_allocator_-> template allocate_rr<typename details:: vov_node<Type,details::sub_op<Type> > >(v,v0); case details::e_sub : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate_rr<typename details:: vov_node<Type,details::add_op<Type> > >(v0,v)); case details::e_mul : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate_rr<typename details:: vov_node<Type,details::mul_op<Type> > >(v0,v)); case details::e_div : return expr_gen(details::e_neg, expr_gen.node_allocator_-> template allocate_rr<typename details:: vov_node<Type,details::div_op<Type> > >(v0,v)); default : break; } } } } switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_cr<typename details::bov_node<Type,op1<Type> > > \ (branch[0], v); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_cob_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type c = static_cast<details::literal_node<Type>*>(branch[0])->value(); free_node(*expr_gen.node_allocator_,branch[0]); if (std::equal_to<T>()(T(0),c) && (details::e_mul == operation)) { free_node(*expr_gen.node_allocator_,branch[1]); return expr_gen(T(0)); } else if (std::equal_to<T>()(T(0),c) && (details::e_div == operation)) { free_node(*expr_gen.node_allocator_, branch[1]); return expr_gen(T(0)); } else if (std::equal_to<T>()(T(0),c) && (details::e_add == operation)) return branch[1]; else if (std::equal_to<T>()(T(1),c) && (details::e_mul == operation)) return branch[1]; if (details::is_cob_node(branch[1])) { // Simplify expressions of the form: // 1. (1 * (2 * (3 * (4 * (5 * (6 * (7 * (8 * (9 + x))))))))) --> 40320 * (9 + x) // 2. (1 + (2 + (3 + (4 + (5 + (6 + (7 + (8 + (9 + x))))))))) --> 45 + x if ( (operation == details::e_mul) || (operation == details::e_add) ) { details::cob_base_node<Type>* cobnode = static_cast<details::cob_base_node<Type>*>(branch[1]); if (operation == cobnode->operation()) { switch (operation) { case details::e_add : cobnode->set_c(c + cobnode->c()); break; case details::e_mul : cobnode->set_c(c * cobnode->c()); break; default : return error_node(); } return cobnode; } } if (operation == details::e_mul) { details::cob_base_node<Type>* cobnode = static_cast<details::cob_base_node<Type>*>(branch[1]); details::operator_type cob_opr = cobnode->operation(); if ( (details::e_div == cob_opr) || (details::e_mul == cob_opr) ) { switch (cob_opr) { case details::e_div : cobnode->set_c(c * cobnode->c()); break; case details::e_mul : cobnode->set_c(cobnode->c() / c); break; default : return error_node(); } return cobnode; } } else if (operation == details::e_div) { details::cob_base_node<Type>* cobnode = static_cast<details::cob_base_node<Type>*>(branch[1]); details::operator_type cob_opr = cobnode->operation(); if ( (details::e_div == cob_opr) || (details::e_mul == cob_opr) ) { details::expression_node<Type>* new_cobnode = error_node(); switch (cob_opr) { case details::e_div : new_cobnode = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::mul_op<Type> > > (c / cobnode->c(), cobnode->move_branch(0)); break; case details::e_mul : new_cobnode = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::div_op<Type> > > (c / cobnode->c(), cobnode->move_branch(0)); break; default : return error_node(); } free_node(*expr_gen.node_allocator_,branch[1]); return new_cobnode; } } } #ifndef exprtk_disable_enhanced_features else if (details::is_sf3ext_node(branch[1])) { expression_node_ptr result = error_node(); if (synthesize_sf4ext_expression::template compile_right<ctype>(expr_gen,c,operation,branch[1],result)) { free_node(*expr_gen.node_allocator_,branch[1]); return result; } } #endif switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_tt<typename details::cob_node<Type,op1<Type> > > \ (c, branch[1]); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_boc_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); details::free_node(*(expr_gen.node_allocator_), branch[1]); if (std::equal_to<T>()(T(0),c) && (details::e_mul == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); return expr_gen(T(0)); } else if (std::equal_to<T>()(T(0),c) && (details::e_div == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); return expr_gen(std::numeric_limits<T>::quiet_NaN()); } else if (std::equal_to<T>()(T(0),c) && (details::e_add == operation)) return branch[0]; else if (std::equal_to<T>()(T(1),c) && (details::e_mul == operation)) return branch[0]; if (details::is_boc_node(branch[0])) { // Simplify expressions of the form: // 1. (((((((((x + 9) * 8) * 7) * 6) * 5) * 4) * 3) * 2) * 1) --> (x + 9) * 40320 // 2. (((((((((x + 9) + 8) + 7) + 6) + 5) + 4) + 3) + 2) + 1) --> x + 45 if ( (operation == details::e_mul) || (operation == details::e_add) ) { details::boc_base_node<Type>* bocnode = static_cast<details::boc_base_node<Type>*>(branch[0]); if (operation == bocnode->operation()) { switch (operation) { case details::e_add : bocnode->set_c(c + bocnode->c()); break; case details::e_mul : bocnode->set_c(c * bocnode->c()); break; default : return error_node(); } return bocnode; } } else if (operation == details::e_div) { details::boc_base_node<Type>* bocnode = static_cast<details::boc_base_node<Type>*>(branch[0]); details::operator_type boc_opr = bocnode->operation(); if ( (details::e_div == boc_opr) || (details::e_mul == boc_opr) ) { switch (boc_opr) { case details::e_div : bocnode->set_c(c * bocnode->c()); break; case details::e_mul : bocnode->set_c(bocnode->c() / c); break; default : return error_node(); } return bocnode; } } else if (operation == details::e_pow) { // (v ^ c0) ^ c1 --> v ^(c0 * c1) details::boc_base_node<Type>* bocnode = static_cast<details::boc_base_node<Type>*>(branch[0]); details::operator_type boc_opr = bocnode->operation(); if (details::e_pow == boc_opr) { bocnode->set_c(bocnode->c() * c); return bocnode; } } } #ifndef exprtk_disable_enhanced_features if (details::is_sf3ext_node(branch[0])) { expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile_left<ctype> (expr_gen, c, operation, branch[0], result); if (synthesis_result) { free_node(*expr_gen.node_allocator_, branch[0]); return result; } } #endif switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_cr<typename details::boc_node<Type,op1<Type> > > \ (branch[0], c); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_cocob_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { expression_node_ptr result = error_node(); // (cob) o c --> cob if (details::is_cob_node(branch[0])) { details::cob_base_node<Type>* cobnode = static_cast<details::cob_base_node<Type>*>(branch[0]); const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); if (std::equal_to<T>()(T(0),c) && (details::e_mul == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); free_node(*expr_gen.node_allocator_, branch[1]); return expr_gen(T(0)); } else if (std::equal_to<T>()(T(0),c) && (details::e_div == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); free_node(*expr_gen.node_allocator_, branch[1]); return expr_gen(T(std::numeric_limits<T>::quiet_NaN())); } else if (std::equal_to<T>()(T(0),c) && (details::e_add == operation)) { free_node(*expr_gen.node_allocator_, branch[1]); return branch[0]; } else if (std::equal_to<T>()(T(1),c) && (details::e_mul == operation)) { free_node(*expr_gen.node_allocator_, branch[1]); return branch[0]; } else if (std::equal_to<T>()(T(1),c) && (details::e_div == operation)) { free_node(*expr_gen.node_allocator_, branch[1]); return branch[0]; } const bool op_addsub = (details::e_add == cobnode->operation()) || (details::e_sub == cobnode->operation()) ; if (op_addsub) { switch (operation) { case details::e_add : cobnode->set_c(cobnode->c() + c); break; case details::e_sub : cobnode->set_c(cobnode->c() - c); break; default : return error_node(); } result = cobnode; } else if (details::e_mul == cobnode->operation()) { switch (operation) { case details::e_mul : cobnode->set_c(cobnode->c() * c); break; case details::e_div : cobnode->set_c(cobnode->c() / c); break; default : return error_node(); } result = cobnode; } else if (details::e_div == cobnode->operation()) { if (details::e_mul == operation) { cobnode->set_c(cobnode->c() * c); result = cobnode; } else if (details::e_div == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::div_op<Type> > > (cobnode->c() / c, cobnode->move_branch(0)); free_node(*expr_gen.node_allocator_, branch[0]); } } if (result) { free_node(*expr_gen.node_allocator_,branch[1]); } } // c o (cob) --> cob else if (details::is_cob_node(branch[1])) { details::cob_base_node<Type>* cobnode = static_cast<details::cob_base_node<Type>*>(branch[1]); const Type c = static_cast<details::literal_node<Type>*>(branch[0])->value(); if (std::equal_to<T>()(T(0),c) && (details::e_mul == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); free_node(*expr_gen.node_allocator_, branch[1]); return expr_gen(T(0)); } else if (std::equal_to<T>()(T(0),c) && (details::e_div == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); free_node(*expr_gen.node_allocator_, branch[1]); return expr_gen(T(0)); } else if (std::equal_to<T>()(T(0),c) && (details::e_add == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); return branch[1]; } else if (std::equal_to<T>()(T(1),c) && (details::e_mul == operation)) { free_node(*expr_gen.node_allocator_, branch[0]); return branch[1]; } if (details::e_add == cobnode->operation()) { if (details::e_add == operation) { cobnode->set_c(c + cobnode->c()); result = cobnode; } else if (details::e_sub == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::sub_op<Type> > > (c - cobnode->c(), cobnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } else if (details::e_sub == cobnode->operation()) { if (details::e_add == operation) { cobnode->set_c(c + cobnode->c()); result = cobnode; } else if (details::e_sub == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::add_op<Type> > > (c - cobnode->c(), cobnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } else if (details::e_mul == cobnode->operation()) { if (details::e_mul == operation) { cobnode->set_c(c * cobnode->c()); result = cobnode; } else if (details::e_div == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::div_op<Type> > > (c / cobnode->c(), cobnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } else if (details::e_div == cobnode->operation()) { if (details::e_mul == operation) { cobnode->set_c(c * cobnode->c()); result = cobnode; } else if (details::e_div == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::mul_op<Type> > > (c / cobnode->c(), cobnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } if (result) { free_node(*expr_gen.node_allocator_,branch[0]); } } return result; } }; struct synthesize_coboc_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { expression_node_ptr result = error_node(); // (boc) o c --> boc if (details::is_boc_node(branch[0])) { details::boc_base_node<Type>* bocnode = static_cast<details::boc_base_node<Type>*>(branch[0]); const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); if (details::e_add == bocnode->operation()) { switch (operation) { case details::e_add : bocnode->set_c(bocnode->c() + c); break; case details::e_sub : bocnode->set_c(bocnode->c() - c); break; default : return error_node(); } result = bocnode; } else if (details::e_mul == bocnode->operation()) { switch (operation) { case details::e_mul : bocnode->set_c(bocnode->c() * c); break; case details::e_div : bocnode->set_c(bocnode->c() / c); break; default : return error_node(); } result = bocnode; } else if (details::e_sub == bocnode->operation()) { if (details::e_add == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::boc_node<Type,details::add_op<Type> > > (bocnode->move_branch(0), c - bocnode->c()); free_node(*expr_gen.node_allocator_,branch[0]); } else if (details::e_sub == operation) { bocnode->set_c(bocnode->c() + c); result = bocnode; } } else if (details::e_div == bocnode->operation()) { switch (operation) { case details::e_div : bocnode->set_c(bocnode->c() * c); break; case details::e_mul : bocnode->set_c(bocnode->c() / c); break; default : return error_node(); } result = bocnode; } if (result) { free_node(*expr_gen.node_allocator_, branch[1]); } } // c o (boc) --> boc else if (details::is_boc_node(branch[1])) { details::boc_base_node<Type>* bocnode = static_cast<details::boc_base_node<Type>*>(branch[1]); const Type c = static_cast<details::literal_node<Type>*>(branch[0])->value(); if (details::e_add == bocnode->operation()) { if (details::e_add == operation) { bocnode->set_c(c + bocnode->c()); result = bocnode; } else if (details::e_sub == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::sub_op<Type> > > (c - bocnode->c(), bocnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } else if (details::e_sub == bocnode->operation()) { if (details::e_add == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::boc_node<Type,details::add_op<Type> > > (bocnode->move_branch(0), c - bocnode->c()); free_node(*expr_gen.node_allocator_,branch[1]); } else if (details::e_sub == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::sub_op<Type> > > (c + bocnode->c(), bocnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } else if (details::e_mul == bocnode->operation()) { if (details::e_mul == operation) { bocnode->set_c(c * bocnode->c()); result = bocnode; } else if (details::e_div == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::div_op<Type> > > (c / bocnode->c(), bocnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } else if (details::e_div == bocnode->operation()) { if (details::e_mul == operation) { bocnode->set_c(bocnode->c() / c); result = bocnode; } else if (details::e_div == operation) { result = expr_gen.node_allocator_-> template allocate_tt<typename details::cob_node<Type,details::div_op<Type> > > (c * bocnode->c(), bocnode->move_branch(0)); free_node(*expr_gen.node_allocator_,branch[1]); } } if (result) { free_node(*expr_gen.node_allocator_,branch[0]); } } return result; } }; #ifndef exprtk_disable_enhanced_features inline bool synthesize_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2], expression_node_ptr& result) { result = error_node(); if (!operation_optimisable(operation)) return false; const std::string node_id = branch_to_id(branch); const typename synthesize_map_t::iterator itr = synthesize_map_.find(node_id); if (synthesize_map_.end() != itr) { result = itr->second((*this), operation, branch); return true; } else return false; } struct synthesize_vov_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type& v1 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_rr<typename details::vov_node<Type,op1<Type> > > \ (v1, v2); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_cov_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type c = static_cast<details::literal_node<Type>*> (branch[0])->value(); const Type& v = static_cast<details::variable_node<Type>*>(branch[1])->ref (); details::free_node(*(expr_gen.node_allocator_),branch[0]); if (std::equal_to<T>()(T(0),c) && (details::e_mul == operation)) return expr_gen(T(0)); else if (std::equal_to<T>()(T(0),c) && (details::e_div == operation)) return expr_gen(T(0)); else if (std::equal_to<T>()(T(0),c) && (details::e_add == operation)) return static_cast<details::variable_node<Type>*>(branch[1]); else if (std::equal_to<T>()(T(1),c) && (details::e_mul == operation)) return static_cast<details::variable_node<Type>*>(branch[1]); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_cr<typename details::cov_node<Type,op1<Type> > > \ (c, v); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_voc_expression { static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { const Type& v = static_cast<details::variable_node<Type>*>(branch[0])->ref (); const Type c = static_cast<details::literal_node<Type>*> (branch[1])->value(); details::free_node(*(expr_gen.node_allocator_), branch[1]); if (expr_gen.cardinal_pow_optimisable(operation,c)) { if (std::equal_to<T>()(T(1),c)) return branch[0]; else return expr_gen.cardinal_pow_optimisation(v,c); } else if (std::equal_to<T>()(T(0),c) && (details::e_mul == operation)) return expr_gen(T(0)); else if (std::equal_to<T>()(T(0),c) && (details::e_div == operation)) return expr_gen(std::numeric_limits<T>::quiet_NaN()); else if (std::equal_to<T>()(T(0),c) && (details::e_add == operation)) return static_cast<details::variable_node<Type>*>(branch[0]); else if (std::equal_to<T>()(T(1),c) && (details::e_mul == operation)) return static_cast<details::variable_node<Type>*>(branch[0]); else if (std::equal_to<T>()(T(1),c) && (details::e_div == operation)) return static_cast<details::variable_node<Type>*>(branch[0]); switch (operation) { #define case_stmt(op0,op1) \ case op0 : return expr_gen.node_allocator_-> \ template allocate_rc<typename details::voc_node<Type,op1<Type> > > \ (v, c); \ basic_opr_switch_statements extended_opr_switch_statements #undef case_stmt default : return error_node(); } } }; struct synthesize_sf3ext_expression { template <typename T0, typename T1, typename T2> static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& sf3opr, T0 t0, T1 t1, T2 t2) { switch (sf3opr) { #define case_stmt(op) \ case details::e_sf##op : return details::T0oT1oT2_sf3ext<T,T0,T1,T2,details::sf##op##_op<Type> >:: \ allocate(*(expr_gen.node_allocator_), t0, t1, t2); \ case_stmt(00) case_stmt(01) case_stmt(02) case_stmt(03) case_stmt(04) case_stmt(05) case_stmt(06) case_stmt(07) case_stmt(08) case_stmt(09) case_stmt(10) case_stmt(11) case_stmt(12) case_stmt(13) case_stmt(14) case_stmt(15) case_stmt(16) case_stmt(17) case_stmt(18) case_stmt(19) case_stmt(20) case_stmt(21) case_stmt(22) case_stmt(23) case_stmt(24) case_stmt(25) case_stmt(26) case_stmt(27) case_stmt(28) case_stmt(29) case_stmt(30) #undef case_stmt default : return error_node(); } } template <typename T0, typename T1, typename T2> static inline bool compile(expression_generator<Type>& expr_gen, const std::string& id, T0 t0, T1 t1, T2 t2, expression_node_ptr& result) { details::operator_type sf3opr; if (!expr_gen.sf3_optimisable(id,sf3opr)) return false; else result = synthesize_sf3ext_expression::template process<T0,T1,T2>(expr_gen,sf3opr,t0,t1,t2); return true; } }; struct synthesize_sf4ext_expression { template <typename T0, typename T1, typename T2, typename T3> static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& sf4opr, T0 t0, T1 t1, T2 t2, T3 t3) { switch (sf4opr) { #define case_stmt0(op) \ case details::e_sf##op : return details::T0oT1oT2oT3_sf4ext<Type,T0,T1,T2,T3,details::sf##op##_op<Type> >:: \ allocate(*(expr_gen.node_allocator_), t0, t1, t2, t3); \ #define case_stmt1(op) \ case details::e_sf4ext##op : return details::T0oT1oT2oT3_sf4ext<Type,T0,T1,T2,T3,details::sfext##op##_op<Type> >:: \ allocate(*(expr_gen.node_allocator_), t0, t1, t2, t3); \ case_stmt0(48) case_stmt0(49) case_stmt0(50) case_stmt0(51) case_stmt0(52) case_stmt0(53) case_stmt0(54) case_stmt0(55) case_stmt0(56) case_stmt0(57) case_stmt0(58) case_stmt0(59) case_stmt0(60) case_stmt0(61) case_stmt0(62) case_stmt0(63) case_stmt0(64) case_stmt0(65) case_stmt0(66) case_stmt0(67) case_stmt0(68) case_stmt0(69) case_stmt0(70) case_stmt0(71) case_stmt0(72) case_stmt0(73) case_stmt0(74) case_stmt0(75) case_stmt0(76) case_stmt0(77) case_stmt0(78) case_stmt0(79) case_stmt0(80) case_stmt0(81) case_stmt0(82) case_stmt0(83) case_stmt1(00) case_stmt1(01) case_stmt1(02) case_stmt1(03) case_stmt1(04) case_stmt1(05) case_stmt1(06) case_stmt1(07) case_stmt1(08) case_stmt1(09) case_stmt1(10) case_stmt1(11) case_stmt1(12) case_stmt1(13) case_stmt1(14) case_stmt1(15) case_stmt1(16) case_stmt1(17) case_stmt1(18) case_stmt1(19) case_stmt1(20) case_stmt1(21) case_stmt1(22) case_stmt1(23) case_stmt1(24) case_stmt1(25) case_stmt1(26) case_stmt1(27) case_stmt1(28) case_stmt1(29) case_stmt1(30) case_stmt1(31) case_stmt1(32) case_stmt1(33) case_stmt1(34) case_stmt1(35) case_stmt1(36) case_stmt1(37) case_stmt1(38) case_stmt1(39) case_stmt1(40) case_stmt1(41) case_stmt1(42) case_stmt1(43) case_stmt1(44) case_stmt1(45) case_stmt1(46) case_stmt1(47) case_stmt1(48) case_stmt1(49) case_stmt1(50) case_stmt1(51) case_stmt1(52) case_stmt1(53) case_stmt1(54) case_stmt1(55) case_stmt1(56) case_stmt1(57) case_stmt1(58) case_stmt1(59) case_stmt1(60) case_stmt1(61) #undef case_stmt0 #undef case_stmt1 default : return error_node(); } } template <typename T0, typename T1, typename T2, typename T3> static inline bool compile(expression_generator<Type>& expr_gen, const std::string& id, T0 t0, T1 t1, T2 t2, T3 t3, expression_node_ptr& result) { details::operator_type sf4opr; if (!expr_gen.sf4_optimisable(id,sf4opr)) return false; else result = synthesize_sf4ext_expression::template process<T0,T1,T2,T3> (expr_gen, sf4opr, t0, t1, t2, t3); return true; } // T o (sf3ext) template <typename ExternalType> static inline bool compile_right(expression_generator<Type>& expr_gen, ExternalType t, const details::operator_type& operation, expression_node_ptr& sf3node, expression_node_ptr& result) { if (!details::is_sf3ext_node(sf3node)) return false; typedef details::T0oT1oT2_base_node<Type>* sf3ext_base_ptr; sf3ext_base_ptr n = static_cast<sf3ext_base_ptr>(sf3node); std::string id = "t" + expr_gen.to_str(operation) + "(" + n->type_id() + ")"; switch (n->type()) { case details::expression_node<Type>::e_covoc : return compile_right_impl <typename covoc_t::sf3_type_node,ExternalType,ctype,vtype,ctype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_covov : return compile_right_impl <typename covov_t::sf3_type_node,ExternalType,ctype,vtype,vtype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_vocov : return compile_right_impl <typename vocov_t::sf3_type_node,ExternalType,vtype,ctype,vtype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_vovoc : return compile_right_impl <typename vovoc_t::sf3_type_node,ExternalType,vtype,vtype,ctype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_vovov : return compile_right_impl <typename vovov_t::sf3_type_node,ExternalType,vtype,vtype,vtype> (expr_gen, id, t, sf3node, result); default : return false; } } // (sf3ext) o T template <typename ExternalType> static inline bool compile_left(expression_generator<Type>& expr_gen, ExternalType t, const details::operator_type& operation, expression_node_ptr& sf3node, expression_node_ptr& result) { if (!details::is_sf3ext_node(sf3node)) return false; typedef details::T0oT1oT2_base_node<Type>* sf3ext_base_ptr; sf3ext_base_ptr n = static_cast<sf3ext_base_ptr>(sf3node); std::string id = "(" + n->type_id() + ")" + expr_gen.to_str(operation) + "t"; switch (n->type()) { case details::expression_node<Type>::e_covoc : return compile_left_impl <typename covoc_t::sf3_type_node,ExternalType,ctype,vtype,ctype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_covov : return compile_left_impl <typename covov_t::sf3_type_node,ExternalType,ctype,vtype,vtype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_vocov : return compile_left_impl <typename vocov_t::sf3_type_node,ExternalType,vtype,ctype,vtype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_vovoc : return compile_left_impl <typename vovoc_t::sf3_type_node,ExternalType,vtype,vtype,ctype> (expr_gen, id, t, sf3node, result); case details::expression_node<Type>::e_vovov : return compile_left_impl <typename vovov_t::sf3_type_node,ExternalType,vtype,vtype,vtype> (expr_gen, id, t, sf3node, result); default : return false; } } template <typename SF3TypeNode, typename ExternalType, typename T0, typename T1, typename T2> static inline bool compile_right_impl(expression_generator<Type>& expr_gen, const std::string& id, ExternalType t, expression_node_ptr& node, expression_node_ptr& result) { SF3TypeNode* n = dynamic_cast<SF3TypeNode*>(node); if (n) { T0 t0 = n->t0(); T1 t1 = n->t1(); T2 t2 = n->t2(); return synthesize_sf4ext_expression::template compile<ExternalType,T0,T1,T2> (expr_gen, id, t, t0, t1, t2, result); } else return false; } template <typename SF3TypeNode, typename ExternalType, typename T0, typename T1, typename T2> static inline bool compile_left_impl(expression_generator<Type>& expr_gen, const std::string& id, ExternalType t, expression_node_ptr& node, expression_node_ptr& result) { SF3TypeNode* n = dynamic_cast<SF3TypeNode*>(node); if (n) { T0 t0 = n->t0(); T1 t1 = n->t1(); T2 t2 = n->t2(); return synthesize_sf4ext_expression::template compile<T0,T1,T2,ExternalType> (expr_gen, id, t0, t1, t2, t, result); } else return false; } }; struct synthesize_vovov_expression0 { typedef typename vovov_t::type0 node_type; typedef typename vovov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 v1) o1 (v2) const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[0]); const Type& v0 = vov->v0(); const Type& v1 = vov->v1(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = vov->operation(); const details::operator_type o1 = operation; binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / v1) / v2 --> (vovov) v0 / (v1 * v2) if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,vtype>(expr_gen, "t/(t*t)", v0, v1, v2, result); exprtk_debug(("(v0 / v1) / v2 --> (vovov) v0 / (v1 * v2)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, vtype, vtype> (expr_gen, id(expr_gen, o0, o1), v0, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t"); } }; struct synthesize_vovov_expression1 { typedef typename vovov_t::type1 node_type; typedef typename vovov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0) o0 (v1 o1 v2) const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vov->v0(); const Type& v2 = vov->v1(); const details::operator_type o0 = operation; const details::operator_type o1 = vov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // v0 / (v1 / v2) --> (vovov) (v0 * v2) / v1 if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,vtype>(expr_gen, "(t*t)/t", v0, v2, v1, result); exprtk_debug(("v0 / (v1 / v2) --> (vovov) (v0 * v2) / v1\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, vtype, vtype> (expr_gen, id(expr_gen, o0, o1), v0, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)"); } }; struct synthesize_vovoc_expression0 { typedef typename vovoc_t::type0 node_type; typedef typename vovoc_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 v1) o1 (c) const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[0]); const Type& v0 = vov->v0(); const Type& v1 = vov->v1(); const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = vov->operation(); const details::operator_type o1 = operation; binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / v1) / c --> (vovoc) v0 / (v1 * c) if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,ctype>(expr_gen, "t/(t*t)", v0, v1, c, result); exprtk_debug(("(v0 / v1) / c --> (vovoc) v0 / (v1 * c)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, vtype, ctype> (expr_gen, id(expr_gen, o0, o1), v0, v1, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t"); } }; struct synthesize_vovoc_expression1 { typedef typename vovoc_t::type1 node_type; typedef typename vovoc_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0) o0 (v1 o1 c) const details::voc_base_node<Type>* voc = static_cast<const details::voc_base_node<Type>*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = voc->v(); const Type c = voc->c(); const details::operator_type o0 = operation; const details::operator_type o1 = voc->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // v0 / (v1 / c) --> (vocov) (v0 * c) / v1 if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,ctype,vtype>(expr_gen, "(t*t)/t", v0, c, v1, result); exprtk_debug(("v0 / (v1 / c) --> (vocov) (v0 * c) / v1\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, vtype, ctype> (expr_gen, id(expr_gen, o0, o1), v0, v1, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)"); } }; struct synthesize_vocov_expression0 { typedef typename vocov_t::type0 node_type; typedef typename vocov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 c) o1 (v1) const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[0]); const Type& v0 = voc->v(); const Type c = voc->c(); const Type& v1 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = voc->operation(); const details::operator_type o1 = operation; binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / c) / v1 --> (vovoc) v0 / (v1 * c) if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,ctype>(expr_gen, "t/(t*t)", v0, v1, c, result); exprtk_debug(("(v0 / c) / v1 --> (vovoc) v0 / (v1 * c)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, ctype, vtype> (expr_gen, id(expr_gen, o0, o1), v0, c, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t"); } }; struct synthesize_vocov_expression1 { typedef typename vocov_t::type1 node_type; typedef typename vocov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0) o0 (c o1 v1) const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type c = cov->c(); const Type& v1 = cov->v(); const details::operator_type o0 = operation; const details::operator_type o1 = cov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // v0 / (c / v1) --> (vovoc) (v0 * v1) / c if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype, vtype, ctype>(expr_gen, "(t*t)/t", v0, v1, c, result); exprtk_debug(("v0 / (c / v1) --> (vovoc) (v0 * v1) / c\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, ctype, vtype> (expr_gen, id(expr_gen, o0, o1), v0, c, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)"); } }; struct synthesize_covov_expression0 { typedef typename covov_t::type0 node_type; typedef typename covov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c o0 v0) o1 (v1) const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[0]); const Type c = cov->c(); const Type& v0 = cov->v(); const Type& v1 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = cov->operation(); const details::operator_type o1 = operation; binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c / v0) / v1 --> (covov) c / (v0 * v1) if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype, vtype, vtype>(expr_gen, "t/(t*t)", c, v0, v1, result); exprtk_debug(("(c / v0) / v1 --> (covov) c / (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<ctype, vtype, vtype> (expr_gen, id(expr_gen, o0, o1), c, v0, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t"); } }; struct synthesize_covov_expression1 { typedef typename covov_t::type1 node_type; typedef typename covov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c) o0 (v0 o1 v1) const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[1]); const Type c = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vov->v0(); const Type& v1 = vov->v1(); const details::operator_type o0 = operation; const details::operator_type o1 = vov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // c / (v0 / v1) --> (covov) (c * v1) / v0 if ((details::e_div == o0) && (details::e_div == o1)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype, vtype, vtype>(expr_gen, "(t*t)/t", c, v1, v0, result); exprtk_debug(("c / (v0 / v1) --> (covov) (c * v1) / v0\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<ctype, vtype, vtype> (expr_gen, id(expr_gen, o0, o1), c, v0, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)"); } }; struct synthesize_covoc_expression0 { typedef typename covoc_t::type0 node_type; typedef typename covoc_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c0 o0 v) o1 (c1) const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[0]); const Type c0 = cov->c(); const Type& v = cov->v(); const Type c1 = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = cov->operation(); const details::operator_type o1 = operation; binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c0 + v) + c1 --> (cov) (c0 + c1) + v if ((details::e_add == o0) && (details::e_add == o1)) { exprtk_debug(("(c0 + v) + c1 --> (cov) (c0 + c1) + v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::add_op<Type> > >(c0 + c1, v); } // (c0 + v) - c1 --> (cov) (c0 - c1) + v else if ((details::e_add == o0) && (details::e_sub == o1)) { exprtk_debug(("(c0 + v) - c1 --> (cov) (c0 - c1) + v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::add_op<Type> > >(c0 - c1, v); } // (c0 - v) + c1 --> (cov) (c0 + c1) - v else if ((details::e_sub == o0) && (details::e_add == o1)) { exprtk_debug(("(c0 - v) + c1 --> (cov) (c0 + c1) - v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::sub_op<Type> > >(c0 + c1, v); } // (c0 - v) - c1 --> (cov) (c0 - c1) - v else if ((details::e_sub == o0) && (details::e_sub == o1)) { exprtk_debug(("(c0 - v) - c1 --> (cov) (c0 - c1) - v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::sub_op<Type> > >(c0 - c1, v); } // (c0 * v) * c1 --> (cov) (c0 * c1) * v else if ((details::e_mul == o0) && (details::e_mul == o1)) { exprtk_debug(("(c0 * v) * c1 --> (cov) (c0 * c1) * v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::mul_op<Type> > >(c0 * c1, v); } // (c0 * v) / c1 --> (cov) (c0 / c1) * v else if ((details::e_mul == o0) && (details::e_div == o1)) { exprtk_debug(("(c0 * v) / c1 --> (cov) (c0 / c1) * v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::mul_op<Type> > >(c0 / c1, v); } // (c0 / v) * c1 --> (cov) (c0 * c1) / v else if ((details::e_div == o0) && (details::e_mul == o1)) { exprtk_debug(("(c0 / v) * c1 --> (cov) (c0 * c1) / v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::div_op<Type> > >(c0 * c1, v); } // (c0 / v) / c1 --> (cov) (c0 / c1) / v else if ((details::e_div == o0) && (details::e_div == o1)) { exprtk_debug(("(c0 / v) / c1 --> (cov) (c0 / c1) / v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::div_op<Type> > >(c0 / c1, v); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<ctype, vtype, ctype> (expr_gen, id(expr_gen, o0, o1), c0, v, c1,result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c0, v, c1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t"); } }; struct synthesize_covoc_expression1 { typedef typename covoc_t::type1 node_type; typedef typename covoc_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c0) o0 (v o1 c1) const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[1]); const Type c0 = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v = voc->v(); const Type c1 = voc->c(); const details::operator_type o0 = operation; const details::operator_type o1 = voc->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c0) + (v + c1) --> (cov) (c0 + c1) + v if ((details::e_add == o0) && (details::e_add == o1)) { exprtk_debug(("(c0) + (v + c1) --> (cov) (c0 + c1) + v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::add_op<Type> > >(c0 + c1, v); } // (c0) + (v - c1) --> (cov) (c0 - c1) + v else if ((details::e_add == o0) && (details::e_sub == o1)) { exprtk_debug(("(c0) + (v - c1) --> (cov) (c0 - c1) + v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::add_op<Type> > >(c0 - c1, v); } // (c0) - (v + c1) --> (cov) (c0 - c1) - v else if ((details::e_sub == o0) && (details::e_add == o1)) { exprtk_debug(("(c0) - (v + c1) --> (cov) (c0 - c1) - v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::sub_op<Type> > >(c0 - c1, v); } // (c0) - (v - c1) --> (cov) (c0 + c1) - v else if ((details::e_sub == o0) && (details::e_sub == o1)) { exprtk_debug(("(c0) - (v - c1) --> (cov) (c0 + c1) - v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::sub_op<Type> > >(c0 + c1, v); } // (c0) * (v * c1) --> (voc) v * (c0 * c1) else if ((details::e_mul == o0) && (details::e_mul == o1)) { exprtk_debug(("(c0) * (v * c1) --> (voc) v * (c0 * c1)\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::mul_op<Type> > >(c0 * c1, v); } // (c0) * (v / c1) --> (cov) (c0 / c1) * v else if ((details::e_mul == o0) && (details::e_div == o1)) { exprtk_debug(("(c0) * (v / c1) --> (cov) (c0 / c1) * v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::mul_op<Type> > >(c0 / c1, v); } // (c0) / (v * c1) --> (cov) (c0 / c1) / v else if ((details::e_div == o0) && (details::e_mul == o1)) { exprtk_debug(("(c0) / (v * c1) --> (cov) (c0 / c1) / v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::div_op<Type> > >(c0 / c1, v); } // (c0) / (v / c1) --> (cov) (c0 * c1) / v else if ((details::e_div == o0) && (details::e_div == o1)) { exprtk_debug(("(c0) / (v / c1) --> (cov) (c0 * c1) / v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::div_op<Type> > >(c0 * c1, v); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<ctype, vtype, ctype> (expr_gen, id(expr_gen, o0, o1), c0, v, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c0, v, c1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)"); } }; struct synthesize_cocov_expression0 { typedef typename cocov_t::type0 node_type; static inline expression_node_ptr process(expression_generator<Type>&, const details::operator_type&, expression_node_ptr (&)[2]) { // (c0 o0 c1) o1 (v) - Not possible. return error_node(); } }; struct synthesize_cocov_expression1 { typedef typename cocov_t::type1 node_type; typedef typename cocov_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c0) o0 (c1 o1 v) const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[1]); const Type c0 = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type c1 = cov->c(); const Type& v = cov->v(); const details::operator_type o0 = operation; const details::operator_type o1 = cov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c0) + (c1 + v) --> (cov) (c0 + c1) + v if ((details::e_add == o0) && (details::e_add == o1)) { exprtk_debug(("(c0) + (c1 + v) --> (cov) (c0 + c1) + v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::add_op<Type> > >(c0 + c1, v); } // (c0) + (c1 - v) --> (cov) (c0 + c1) - v else if ((details::e_add == o0) && (details::e_sub == o1)) { exprtk_debug(("(c0) + (c1 - v) --> (cov) (c0 + c1) - v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::sub_op<Type> > >(c0 + c1, v); } // (c0) - (c1 + v) --> (cov) (c0 - c1) - v else if ((details::e_sub == o0) && (details::e_add == o1)) { exprtk_debug(("(c0) - (c1 + v) --> (cov) (c0 - c1) - v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::sub_op<Type> > >(c0 - c1, v); } // (c0) - (c1 - v) --> (cov) (c0 - c1) + v else if ((details::e_sub == o0) && (details::e_sub == o1)) { exprtk_debug(("(c0) - (c1 - v) --> (cov) (c0 - c1) + v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::add_op<Type> > >(c0 - c1, v); } // (c0) * (c1 * v) --> (cov) (c0 * c1) * v else if ((details::e_mul == o0) && (details::e_mul == o1)) { exprtk_debug(("(c0) * (c1 * v) --> (cov) (c0 * c1) * v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::mul_op<Type> > >(c0 * c1, v); } // (c0) * (c1 / v) --> (cov) (c0 * c1) / v else if ((details::e_mul == o0) && (details::e_div == o1)) { exprtk_debug(("(c0) * (c1 / v) --> (cov) (c0 * c1) / v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::div_op<Type> > >(c0 * c1, v); } // (c0) / (c1 * v) --> (cov) (c0 / c1) / v else if ((details::e_div == o0) && (details::e_mul == o1)) { exprtk_debug(("(c0) / (c1 * v) --> (cov) (c0 / c1) / v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::div_op<Type> > >(c0 / c1, v); } // (c0) / (c1 / v) --> (cov) (c0 / c1) * v else if ((details::e_div == o0) && (details::e_div == o1)) { exprtk_debug(("(c0) / (c1 / v) --> (cov) (c0 / c1) * v\n")); return expr_gen.node_allocator_-> template allocate_cr<typename details::cov_node<Type,details::mul_op<Type> > >(c0 / c1, v); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<ctype, ctype, vtype> (expr_gen, id(expr_gen, o0, o1), c0, c1, v, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c0, c1, v, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)"); } }; struct synthesize_vococ_expression0 { typedef typename vococ_t::type0 node_type; typedef typename vococ_t::sf3_type sf3_type; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v o0 c0) o1 (c1) const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[0]); const Type& v = voc->v(); const Type& c0 = voc->c(); const Type& c1 = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = voc->operation(); const details::operator_type o1 = operation; binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v + c0) + c1 --> (voc) v + (c0 + c1) if ((details::e_add == o0) && (details::e_add == o1)) { exprtk_debug(("(v + c0) + c1 --> (voc) v + (c0 + c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::add_op<Type> > >(v, c0 + c1); } // (v + c0) - c1 --> (voc) v + (c0 - c1) else if ((details::e_add == o0) && (details::e_sub == o1)) { exprtk_debug(("(v + c0) - c1 --> (voc) v + (c0 - c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::add_op<Type> > >(v, c0 - c1); } // (v - c0) + c1 --> (voc) v - (c0 + c1) else if ((details::e_sub == o0) && (details::e_add == o1)) { exprtk_debug(("(v - c0) + c1 --> (voc) v - (c0 + c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::add_op<Type> > >(v, c1 - c0); } // (v - c0) - c1 --> (voc) v - (c0 + c1) else if ((details::e_sub == o0) && (details::e_sub == o1)) { exprtk_debug(("(v - c0) - c1 --> (voc) v - (c0 + c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::sub_op<Type> > >(v, c0 + c1); } // (v * c0) * c1 --> (voc) v * (c0 * c1) else if ((details::e_mul == o0) && (details::e_mul == o1)) { exprtk_debug(("(v * c0) * c1 --> (voc) v * (c0 * c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::mul_op<Type> > >(v, c0 * c1); } // (v * c0) / c1 --> (voc) v * (c0 / c1) else if ((details::e_mul == o0) && (details::e_div == o1)) { exprtk_debug(("(v * c0) / c1 --> (voc) v * (c0 / c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::mul_op<Type> > >(v, c0 / c1); } // (v / c0) * c1 --> (voc) v * (c1 / c0) else if ((details::e_div == o0) && (details::e_mul == o1)) { exprtk_debug(("(v / c0) * c1 --> (voc) v * (c1 / c0)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::mul_op<Type> > >(v, c1 / c0); } // (v / c0) / c1 --> (voc) v / (c0 * c1) else if ((details::e_div == o0) && (details::e_div == o1)) { exprtk_debug(("(v / c0) / c1 --> (voc) v / (c0 * c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::div_op<Type> > >(v, c0 * c1); } // (v ^ c0) ^ c1 --> (voc) v ^ (c0 * c1) else if ((details::e_pow == o0) && (details::e_pow == o1)) { exprtk_debug(("(v ^ c0) ^ c1 --> (voc) v ^ (c0 * c1)\n")); return expr_gen.node_allocator_-> template allocate_rc<typename details::voc_node<Type,details::pow_op<Type> > >(v, c0 * c1); } } const bool synthesis_result = synthesize_sf3ext_expression::template compile<vtype, ctype, ctype> (expr_gen, id(expr_gen, o0, o1), v, c0, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v, c0, c1, f0, f1); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t"); } }; struct synthesize_vococ_expression1 { typedef typename vococ_t::type0 node_type; static inline expression_node_ptr process(expression_generator<Type>&, const details::operator_type&, expression_node_ptr (&)[2]) { // (v) o0 (c0 o1 c1) - Not possible. exprtk_debug(("(v) o0 (c0 o1 c1) - Not possible.\n")); return error_node(); } }; struct synthesize_vovovov_expression0 { typedef typename vovovov_t::type0 node_type; typedef typename vovovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 v1) o1 (v2 o2 v3) const details::vov_base_node<Type>* vov0 = static_cast<details::vov_base_node<Type>*>(branch[0]); const details::vov_base_node<Type>* vov1 = static_cast<details::vov_base_node<Type>*>(branch[1]); const Type& v0 = vov0->v0(); const Type& v1 = vov0->v1(); const Type& v2 = vov1->v0(); const Type& v3 = vov1->v1(); const details::operator_type o0 = vov0->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = vov1->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / v1) * (v2 / v3) --> (vovovov) (v0 * v2) / (v1 * v3) if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,vtype>(expr_gen, "(t*t)/(t*t)", v0, v2, v1, v3, result); exprtk_debug(("(v0 / v1) * (v2 / v3) --> (vovovov) (v0 * v2) / (v1 * v3)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / v1) / (v2 / v3) --> (vovovov) (v0 * v3) / (v1 * v2) else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,vtype>(expr_gen, "(t*t)/(t*t)", v0, v3, v1, v2, result); exprtk_debug(("(v0 / v1) / (v2 / v3) --> (vovovov) (v0 * v3) / (v1 * v2)\n")); return (synthesis_result) ? result : error_node(); } // (v0 + v1) / (v2 / v3) --> (vovovov) (v0 + v1) * (v3 / v2) else if ((details::e_add == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,vtype>(expr_gen, "(t+t)*(t/t)", v0, v1, v3, v2, result); exprtk_debug(("(v0 + v1) / (v2 / v3) --> (vovovov) (v0 + v1) * (v3 / v2)\n")); return (synthesis_result) ? result : error_node(); } // (v0 - v1) / (v2 / v3) --> (vovovov) (v0 + v1) * (v3 / v2) else if ((details::e_sub == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,vtype>(expr_gen, "(t-t)*(t/t)", v0, v1, v3, v2, result); exprtk_debug(("(v0 - v1) / (v2 / v3) --> (vovovov) (v0 - v1) * (v3 / v2)\n")); return (synthesis_result) ? result : error_node(); } // (v0 * v1) / (v2 / v3) --> (vovovov) ((v0 * v1) * v3) / v2 else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,vtype>(expr_gen, "((t*t)*t)/t", v0, v1, v3, v2, result); exprtk_debug(("(v0 * v1) / (v2 / v3) --> (vovovov) ((v0 * v1) * v3) / v2\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, v3,result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, v3, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vovovoc_expression0 { typedef typename vovovoc_t::type0 node_type; typedef typename vovovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 v1) o1 (v2 o2 c) const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[0]); const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[1]); const Type& v0 = vov->v0(); const Type& v1 = vov->v1(); const Type& v2 = voc->v (); const Type c = voc->c (); const details::operator_type o0 = vov->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = voc->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / v1) * (v2 / c) --> (vovovoc) (v0 * v2) / (v1 * c) if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,ctype>(expr_gen, "(t*t)/(t*t)", v0, v2, v1, c, result); exprtk_debug(("(v0 / v1) * (v2 / c) --> (vovovoc) (v0 * v2) / (v1 * c)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / v1) / (v2 / c) --> (vocovov) (v0 * c) / (v1 * v2) if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,ctype,vtype,vtype>(expr_gen, "(t*t)/(t*t)", v0, c, v1, v2, result); exprtk_debug(("(v0 / v1) / (v2 / c) --> (vocovov) (v0 * c) / (v1 * v2)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, c, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vovocov_expression0 { typedef typename vovocov_t::type0 node_type; typedef typename vovocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 v1) o1 (c o2 v2) const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[0]); const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[1]); const Type& v0 = vov->v0(); const Type& v1 = vov->v1(); const Type& v2 = cov->v (); const Type c = cov->c (); const details::operator_type o0 = vov->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = cov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / v1) * (c / v2) --> (vocovov) (v0 * c) / (v1 * v2) if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,ctype,vtype,vtype>(expr_gen, "(t*t)/(t*t)", v0, c, v1, v2, result); exprtk_debug(("(v0 / v1) * (c / v2) --> (vocovov) (v0 * c) / (v1 * v2)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / v1) / (c / v2) --> (vovovoc) (v0 * v2) / (v1 * c) if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,vtype,ctype>(expr_gen, "(t*t)/(t*t)", v0, v2, v1, c, result); exprtk_debug(("(v0 / v1) / (c / v2) --> (vovovoc) (v0 * v2) / (v1 * c)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, c, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vocovov_expression0 { typedef typename vocovov_t::type0 node_type; typedef typename vocovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 c) o1 (v1 o2 v2) const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[0]); const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[1]); const Type c = voc->c (); const Type& v0 = voc->v (); const Type& v1 = vov->v0(); const Type& v2 = vov->v1(); const details::operator_type o0 = voc->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = vov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 / c) * (v1 / v2) --> (vovocov) (v0 * v1) / (c * v2) if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,ctype,vtype>(expr_gen, "(t*t)/(t*t)", v0, v1, c, v2, result); exprtk_debug(("(v0 / c) * (v1 / v2) --> (vovocov) (v0 * v1) / (c * v2)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c) / (v1 / v2) --> (vovocov) (v0 * v2) / (c * v1) if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,vtype,ctype,vtype>(expr_gen, "(t*t)/(t*t)", v0, v2, c, v1, result); exprtk_debug(("(v0 / c) / (v1 / v2) --> (vovocov) (v0 * v2) / (c * v1)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_covovov_expression0 { typedef typename covovov_t::type0 node_type; typedef typename covovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c o0 v0) o1 (v1 o2 v2) const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[0]); const details::vov_base_node<Type>* vov = static_cast<details::vov_base_node<Type>*>(branch[1]); const Type c = cov->c (); const Type& v0 = cov->v (); const Type& v1 = vov->v0(); const Type& v2 = vov->v1(); const details::operator_type o0 = cov->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = vov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c / v0) * (v1 / v2) --> (covovov) (c * v1) / (v0 * v2) if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<ctype,vtype,vtype,vtype>(expr_gen, "(t*t)/(t*t)", c, v1, v0, v2, result); exprtk_debug(("(c / v0) * (v1 / v2) --> (covovov) (c * v1) / (v0 * v2)\n")); return (synthesis_result) ? result : error_node(); } // (c / v0) / (v1 / v2) --> (covovov) (c * v2) / (v0 * v1) if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<ctype,vtype,vtype,vtype>(expr_gen, "(t*t)/(t*t)", c, v2, v0, v1, result); exprtk_debug(("(c / v0) / (v1 / v2) --> (covovov) (c * v2) / (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c, v0, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_covocov_expression0 { typedef typename covocov_t::type0 node_type; typedef typename covocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c0 o0 v0) o1 (c1 o2 v1) const details::cov_base_node<Type>* cov0 = static_cast<details::cov_base_node<Type>*>(branch[0]); const details::cov_base_node<Type>* cov1 = static_cast<details::cov_base_node<Type>*>(branch[1]); const Type c0 = cov0->c(); const Type& v0 = cov0->v(); const Type c1 = cov1->c(); const Type& v1 = cov1->v(); const details::operator_type o0 = cov0->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = cov1->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c0 + v0) + (c1 + v1) --> (covov) (c0 + c1) + v0 + v1 if ((details::e_add == o0) && (details::e_add == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)+t", (c0 + c1), v0, v1, result); exprtk_debug(("(c0 + v0) + (c1 + v1) --> (covov) (c0 + c1) + v0 + v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 + v0) - (c1 + v1) --> (covov) (c0 - c1) + v0 - v1 else if ((details::e_add == o0) && (details::e_sub == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)-t", (c0 - c1), v0, v1, result); exprtk_debug(("(c0 + v0) - (c1 + v1) --> (covov) (c0 - c1) + v0 - v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 - v0) - (c1 - v1) --> (covov) (c0 - c1) - v0 + v1 else if ((details::e_sub == o0) && (details::e_sub == o1) && (details::e_sub == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t-t)+t", (c0 - c1), v0, v1, result); exprtk_debug(("(c0 - v0) - (c1 - v1) --> (covov) (c0 - c1) - v0 + v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 * v0) * (c1 * v1) --> (covov) (c0 * c1) * v0 * v1 else if ((details::e_mul == o0) && (details::e_mul == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)*t", (c0 * c1), v0, v1, result); exprtk_debug(("(c0 * v0) * (c1 * v1) --> (covov) (c0 * c1) * v0 * v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 * v0) / (c1 * v1) --> (covov) (c0 / c1) * (v0 / v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c0 / c1), v0, v1, result); exprtk_debug(("(c0 * v0) / (c1 * v1) --> (covov) (c0 / c1) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (c0 / v0) * (c1 / v1) --> (covov) (c0 * c1) / (v0 * v1) else if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t/(t*t)", (c0 * c1), v0, v1, result); exprtk_debug(("(c0 / v0) * (c1 / v1) --> (covov) (c0 * c1) / (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (c0 / v0) / (c1 / v1) --> (covov) ((c0 / c1) * v1) / v0 else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c0 / c1), v1, v0, result); exprtk_debug(("(c0 / v0) / (c1 / v1) --> (covov) ((c0 / c1) * v1) / v0\n")); return (synthesis_result) ? result : error_node(); } // (c0 * v0) / (c1 / v1) --> (covov) (c0 / c1) * (v0 * v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t*(t*t)", (c0 / c1), v0, v1, result); exprtk_debug(("(c0 * v0) / (c1 / v1) --> (covov) (c0 / c1) * (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (c0 / v0) / (c1 * v1) --> (covov) (c0 / c1) / (v0 * v1) else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t/(t*t)", (c0 / c1), v0, v1, result); exprtk_debug(("(c0 / v0) / (c1 * v1) --> (covov) (c0 / c1) / (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (c * v0) +/- (c * v1) --> (covov) c * (v0 +/- v1) else if ( (std::equal_to<T>()(c0,c1)) && (details::e_mul == o0) && (details::e_mul == o2) && ( (details::e_add == o1) || (details::e_sub == o1) ) ) { std::string specfunc; switch (o1) { case details::e_add : specfunc = "t*(t+t)"; break; case details::e_sub : specfunc = "t*(t-t)"; break; default : return error_node(); } const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype, vtype, vtype>(expr_gen, specfunc, c0, v0, v1, result); exprtk_debug(("(c * v0) +/- (c * v1) --> (covov) c * (v0 +/- v1)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vocovoc_expression0 { typedef typename vocovoc_t::type0 node_type; typedef typename vocovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 c0) o1 (v1 o2 c1) const details::voc_base_node<Type>* voc0 = static_cast<details::voc_base_node<Type>*>(branch[0]); const details::voc_base_node<Type>* voc1 = static_cast<details::voc_base_node<Type>*>(branch[1]); const Type c0 = voc0->c(); const Type& v0 = voc0->v(); const Type c1 = voc1->c(); const Type& v1 = voc1->v(); const details::operator_type o0 = voc0->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = voc1->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 + c0) + (v1 + c1) --> (covov) (c0 + c1) + v0 + v1 if ((details::e_add == o0) && (details::e_add == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)+t", (c0 + c1), v0, v1, result); exprtk_debug(("(v0 + c0) + (v1 + c1) --> (covov) (c0 + c1) + v0 + v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 + c0) - (v1 + c1) --> (covov) (c0 - c1) + v0 - v1 else if ((details::e_add == o0) && (details::e_sub == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)-t", (c0 - c1), v0, v1, result); exprtk_debug(("(v0 + c0) - (v1 + c1) --> (covov) (c0 - c1) + v0 - v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 - c0) - (v1 - c1) --> (covov) (c1 - c0) + v0 - v1 else if ((details::e_sub == o0) && (details::e_sub == o1) && (details::e_sub == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)-t", (c1 - c0), v0, v1, result); exprtk_debug(("(v0 - c0) - (v1 - c1) --> (covov) (c1 - c0) + v0 - v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c0) * (v1 * c1) --> (covov) (c0 * c1) * v0 * v1 else if ((details::e_mul == o0) && (details::e_mul == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)*t", (c0 * c1), v0, v1, result); exprtk_debug(("(v0 * c0) * (v1 * c1) --> (covov) (c0 * c1) * v0 * v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c0) / (v1 * c1) --> (covov) (c0 / c1) * (v0 / v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c0 / c1), v0, v1, result); exprtk_debug(("(v0 * c0) / (v1 * c1) --> (covov) (c0 / c1) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) * (v1 / c1) --> (covov) (1 / (c0 * c1)) * v0 * v1 else if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)*t", Type(1) / (c0 * c1), v0, v1, result); exprtk_debug(("(v0 / c0) * (v1 / c1) --> (covov) (1 / (c0 * c1)) * v0 * v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) / (v1 / c1) --> (covov) ((c1 / c0) * v0) / v1 else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c1 / c0), v0, v1, result); exprtk_debug(("(v0 / c0) / (v1 / c1) --> (covov) ((c1 / c0) * v0) / v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c0) / (v1 / c1) --> (covov) (c0 * c1) * (v0 / v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t*(t/t)", (c0 * c1), v0, v1, result); exprtk_debug(("(v0 * c0) / (v1 / c1) --> (covov) (c0 * c1) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) / (v1 * c1) --> (covov) (1 / (c0 * c1)) * v0 / v1 else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t*(t/t)", Type(1) / (c0 * c1), v0, v1, result); exprtk_debug(("(v0 / c0) / (v1 * c1) --> (covov) (1 / (c0 * c1)) * v0 / v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) * (v1 + c1) --> (vocovoc) (v0 * (1 / c0)) * (v1 + c1) else if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,ctype,vtype,ctype>(expr_gen, "(t*t)*(t+t)", v0, T(1) / c0, v1, c1, result); exprtk_debug(("(v0 / c0) * (v1 + c1) --> (vocovoc) (v0 * (1 / c0)) * (v1 + c1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) * (v1 - c1) --> (vocovoc) (v0 * (1 / c0)) * (v1 - c1) else if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_sub == o2)) { const bool synthesis_result = synthesize_sf4ext_expression:: template compile<vtype,ctype,vtype,ctype>(expr_gen, "(t*t)*(t-t)", v0, T(1) / c0, v1, c1, result); exprtk_debug(("(v0 / c0) * (v1 - c1) --> (vocovoc) (v0 * (1 / c0)) * (v1 - c1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c) +/- (v1 * c) --> (covov) c * (v0 +/- v1) else if ( (std::equal_to<T>()(c0,c1)) && (details::e_mul == o0) && (details::e_mul == o2) && ( (details::e_add == o1) || (details::e_sub == o1) ) ) { std::string specfunc; switch (o1) { case details::e_add : specfunc = "t*(t+t)"; break; case details::e_sub : specfunc = "t*(t-t)"; break; default : return error_node(); } const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, specfunc, c0, v0, v1, result); exprtk_debug(("(v0 * c) +/- (v1 * c) --> (covov) c * (v0 +/- v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c) +/- (v1 / c) --> (vovoc) (v0 +/- v1) / c else if ( (std::equal_to<T>()(c0,c1)) && (details::e_div == o0) && (details::e_div == o2) && ( (details::e_add == o1) || (details::e_sub == o1) ) ) { std::string specfunc; switch (o1) { case details::e_add : specfunc = "(t+t)/t"; break; case details::e_sub : specfunc = "(t-t)/t"; break; default : return error_node(); } const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,ctype>(expr_gen, specfunc, v0, v1, c0, result); exprtk_debug(("(v0 / c) +/- (v1 / c) --> (vovoc) (v0 +/- v1) / c\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_covovoc_expression0 { typedef typename covovoc_t::type0 node_type; typedef typename covovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (c0 o0 v0) o1 (v1 o2 c1) const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[0]); const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[1]); const Type c0 = cov->c(); const Type& v0 = cov->v(); const Type c1 = voc->c(); const Type& v1 = voc->v(); const details::operator_type o0 = cov->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = voc->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (c0 + v0) + (v1 + c1) --> (covov) (c0 + c1) + v0 + v1 if ((details::e_add == o0) && (details::e_add == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)+t", (c0 + c1), v0, v1, result); exprtk_debug(("(c0 + v0) + (v1 + c1) --> (covov) (c0 + c1) + v0 + v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 + v0) - (v1 + c1) --> (covov) (c0 - c1) + v0 - v1 else if ((details::e_add == o0) && (details::e_sub == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)-t", (c0 - c1), v0, v1, result); exprtk_debug(("(c0 + v0) - (v1 + c1) --> (covov) (c0 - c1) + v0 - v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 - v0) - (v1 - c1) --> (covov) (c0 + c1) - v0 - v1 else if ((details::e_sub == o0) && (details::e_sub == o1) && (details::e_sub == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t-(t+t)", (c0 + c1), v0, v1, result); exprtk_debug(("(c0 - v0) - (v1 - c1) --> (covov) (c0 + c1) - v0 - v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 * v0) * (v1 * c1) --> (covov) (c0 * c1) * v0 * v1 else if ((details::e_mul == o0) && (details::e_mul == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)*t", (c0 * c1), v0, v1, result); exprtk_debug(("(c0 * v0) * (v1 * c1) --> (covov) (c0 * c1) * v0 * v1\n")); return (synthesis_result) ? result : error_node(); } // (c0 * v0) / (v1 * c1) --> (covov) (c0 / c1) * (v0 / v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c0 / c1), v0, v1, result); exprtk_debug(("(c0 * v0) / (v1 * c1) --> (covov) (c0 / c1) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (c0 / v0) * (v1 / c1) --> (covov) (c0 / c1) * (v1 / v0) else if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t*(t/t)", (c0 / c1), v1, v0, result); exprtk_debug(("(c0 / v0) * (v1 / c1) --> (covov) (c0 / c1) * (v1 / v0)\n")); return (synthesis_result) ? result : error_node(); } // (c0 / v0) / (v1 / c1) --> (covov) (c0 * c1) / (v0 * v1) else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t/(t*t)", (c0 * c1), v0, v1, result); exprtk_debug(("(c0 / v0) / (v1 / c1) --> (covov) (c0 * c1) / (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (c0 * v0) / (v1 / c1) --> (covov) (c0 * c1) * (v0 / v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c0 * c1), v0, v1, result); exprtk_debug(("(c0 * v0) / (v1 / c1) --> (covov) (c0 * c1) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (c0 / v0) / (v1 * c1) --> (covov) (c0 / c1) / (v0 * v1) else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "t/(t*t)", (c0 / c1), v0, v1, result); exprtk_debug(("(c0 / v0) / (v1 * c1) --> (covov) (c0 / c1) / (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (c * v0) +/- (v1 * c) --> (covov) c * (v0 +/- v1) else if ( (std::equal_to<T>()(c0,c1)) && (details::e_mul == o0) && (details::e_mul == o2) && ( (details::e_add == o1) || (details::e_sub == o1) ) ) { std::string specfunc; switch (o1) { case details::e_add : specfunc = "t*(t+t)"; break; case details::e_sub : specfunc = "t*(t-t)"; break; default : return error_node(); } const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen,specfunc, c0, v0, v1, result); exprtk_debug(("(c * v0) +/- (v1 * c) --> (covov) c * (v0 +/- v1)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vococov_expression0 { typedef typename vococov_t::type0 node_type; typedef typename vococov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 c0) o1 (c1 o2 v1) const details::voc_base_node<Type>* voc = static_cast<details::voc_base_node<Type>*>(branch[0]); const details::cov_base_node<Type>* cov = static_cast<details::cov_base_node<Type>*>(branch[1]); const Type c0 = voc->c(); const Type& v0 = voc->v(); const Type c1 = cov->c(); const Type& v1 = cov->v(); const details::operator_type o0 = voc->operation(); const details::operator_type o1 = operation; const details::operator_type o2 = cov->operation(); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (expr_gen.parser_->settings_.strength_reduction_enabled()) { // (v0 + c0) + (c1 + v1) --> (covov) (c0 + c1) + v0 + v1 if ((details::e_add == o0) && (details::e_add == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)+t", (c0 + c1), v0, v1, result); exprtk_debug(("(v0 + c0) + (c1 + v1) --> (covov) (c0 + c1) + v0 + v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 + c0) - (c1 + v1) --> (covov) (c0 - c1) + v0 - v1 else if ((details::e_add == o0) && (details::e_sub == o1) && (details::e_add == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t+t)-t", (c0 - c1), v0, v1, result); exprtk_debug(("(v0 + c0) - (c1 + v1) --> (covov) (c0 - c1) + v0 - v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 - c0) - (c1 - v1) --> (vovoc) v0 + v1 - (c1 + c0) else if ((details::e_sub == o0) && (details::e_sub == o1) && (details::e_sub == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,ctype>(expr_gen, "(t+t)-t", v0, v1, (c1 + c0), result); exprtk_debug(("(v0 - c0) - (c1 - v1) --> (vovoc) v0 + v1 - (c1 + c0)\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c0) * (c1 * v1) --> (covov) (c0 * c1) * v0 * v1 else if ((details::e_mul == o0) && (details::e_mul == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)*t", (c0 * c1), v0, v1, result); exprtk_debug(("(v0 * c0) * (c1 * v1) --> (covov) (c0 * c1) * v0 * v1\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c0) / (c1 * v1) --> (covov) (c0 / c1) * (v0 * v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c0 / c1), v0, v1, result); exprtk_debug(("(v0 * c0) / (c1 * v1) --> (covov) (c0 / c1) * (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) * (c1 / v1) --> (covov) (c1 / c0) * (v0 / v1) else if ((details::e_div == o0) && (details::e_mul == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", (c1 / c0), v0, v1, result); exprtk_debug(("(v0 / c0) * (c1 / v1) --> (covov) (c1 / c0) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c0) / (c1 / v1) --> (covov) (c0 / c1) * (v0 * v1) else if ((details::e_mul == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)*t", (c0 / c1), v0, v1, result); exprtk_debug(("(v0 * c0) / (c1 / v1) --> (covov) (c0 / c1) * (v0 * v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) / (c1 * v1) --> (covov) (1 / (c0 * c1)) * (v0 / v1) else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_mul == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, "(t*t)/t", Type(1) / (c0 * c1), v0, v1, result); exprtk_debug(("(v0 / c0) / (c1 * v1) --> (covov) (1 / (c0 * c1)) * (v0 / v1)\n")); return (synthesis_result) ? result : error_node(); } // (v0 / c0) / (c1 / v1) --> (vovoc) (v0 * v1) * (1 / (c0 * c1)) else if ((details::e_div == o0) && (details::e_div == o1) && (details::e_div == o2)) { const bool synthesis_result = synthesize_sf3ext_expression:: template compile<vtype,vtype,ctype>(expr_gen, "(t*t)*t", v0, v1, Type(1) / (c0 * c1), result); exprtk_debug(("(v0 / c0) / (c1 / v1) --> (vovoc) (v0 * v1) * (1 / (c0 * c1))\n")); return (synthesis_result) ? result : error_node(); } // (v0 * c) +/- (c * v1) --> (covov) c * (v0 +/- v1) else if ( (std::equal_to<T>()(c0,c1)) && (details::e_mul == o0) && (details::e_mul == o2) && ( (details::e_add == o1) || (details::e_sub == o1) ) ) { std::string specfunc; switch (o1) { case details::e_add : specfunc = "t*(t+t)"; break; case details::e_sub : specfunc = "t*(t-t)"; break; default : return error_node(); } const bool synthesis_result = synthesize_sf3ext_expression:: template compile<ctype,vtype,vtype>(expr_gen, specfunc, c0, v0, v1, result); exprtk_debug(("(v0 * c) +/- (c * v1) --> (covov) c * (v0 +/- v1)\n")); return (synthesis_result) ? result : error_node(); } } const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); else if (!expr_gen.valid_operator(o1,f1)) return error_node(); else if (!expr_gen.valid_operator(o2,f2)) return error_node(); else return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vovovov_expression1 { typedef typename vovovov_t::type1 node_type; typedef typename vovovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 (v1 o1 (v2 o2 v3)) typedef typename synthesize_vovov_expression1::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vovov->t0(); const Type& v2 = vovov->t1(); const Type& v3 = vovov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovov->f0()); const details::operator_type o2 = expr_gen.get_operator(vovov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovov->f0(); binary_functor_t f2 = vovov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); if (synthesize_sf4ext_expression::template compile<T0,T1,T2,T3>(expr_gen,id(expr_gen,o0,o1,o2),v0,v1,v2,v3,result)) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 (v1 o1 (v2 o2 v3))\n")); return node_type::allocate(*(expr_gen.node_allocator_),v0,v1,v2,v3,f0,f1,f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_vovovoc_expression1 { typedef typename vovovoc_t::type1 node_type; typedef typename vovovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 (v1 o1 (v2 o2 c)) typedef typename synthesize_vovoc_expression1::node_type lcl_vovoc_t; const lcl_vovoc_t* vovoc = static_cast<const lcl_vovoc_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vovoc->t0(); const Type& v2 = vovoc->t1(); const Type c = vovoc->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovoc->f0()); const details::operator_type o2 = expr_gen.get_operator(vovoc->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovoc->f0(); binary_functor_t f2 = vovoc->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 (v1 o1 (v2 o2 c))\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, c, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_vovocov_expression1 { typedef typename vovocov_t::type1 node_type; typedef typename vovocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 (v1 o1 (c o2 v2)) typedef typename synthesize_vocov_expression1::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vocov->t0(); const Type c = vocov->t1(); const Type& v2 = vocov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vocov->f0()); const details::operator_type o2 = expr_gen.get_operator(vocov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vocov->f0(); binary_functor_t f2 = vocov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, c, v2, result); if (synthesis_result) return result; if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 (v1 o1 (c o2 v2))\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_vocovov_expression1 { typedef typename vocovov_t::type1 node_type; typedef typename vocovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 (c o1 (v1 o2 v2)) typedef typename synthesize_covov_expression1::node_type lcl_covov_t; const lcl_covov_t* covov = static_cast<const lcl_covov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type c = covov->t0(); const Type& v1 = covov->t1(); const Type& v2 = covov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(covov->f0()); const details::operator_type o2 = expr_gen.get_operator(covov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = covov->f0(); binary_functor_t f2 = covov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 (c o1 (v1 o2 v2))\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_covovov_expression1 { typedef typename covovov_t::type1 node_type; typedef typename covovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // c o0 (v0 o1 (v1 o2 v2)) typedef typename synthesize_vovov_expression1::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[1]); const Type c = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vovov->t0(); const Type& v1 = vovov->t1(); const Type& v2 = vovov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovov->f0()); const details::operator_type o2 = expr_gen.get_operator(vovov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovov->f0(); binary_functor_t f2 = vovov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c, v0, v1, v2, result); if (synthesis_result) return result; if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("c o0 (v0 o1 (v1 o2 v2))\n")); return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_covocov_expression1 { typedef typename covocov_t::type1 node_type; typedef typename covocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // c0 o0 (v0 o1 (c1 o2 v1)) typedef typename synthesize_vocov_expression1::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[1]); const Type c0 = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vocov->t0(); const Type c1 = vocov->t1(); const Type& v1 = vocov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vocov->f0()); const details::operator_type o2 = expr_gen.get_operator(vocov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vocov->f0(); binary_functor_t f2 = vocov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("c0 o0 (v0 o1 (c1 o2 v1))\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_vocovoc_expression1 { typedef typename vocovoc_t::type1 node_type; typedef typename vocovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 (c0 o1 (v1 o2 c2)) typedef typename synthesize_covoc_expression1::node_type lcl_covoc_t; const lcl_covoc_t* covoc = static_cast<const lcl_covoc_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type c0 = covoc->t0(); const Type& v1 = covoc->t1(); const Type c1 = covoc->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(covoc->f0()); const details::operator_type o2 = expr_gen.get_operator(covoc->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = covoc->f0(); binary_functor_t f2 = covoc->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 (c0 o1 (v1 o2 c2))\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_covovoc_expression1 { typedef typename covovoc_t::type1 node_type; typedef typename covovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // c0 o0 (v0 o1 (v1 o2 c1)) typedef typename synthesize_vovoc_expression1::node_type lcl_vovoc_t; const lcl_vovoc_t* vovoc = static_cast<const lcl_vovoc_t*>(branch[1]); const Type c0 = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vovoc->t0(); const Type& v1 = vovoc->t1(); const Type c1 = vovoc->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovoc->f0()); const details::operator_type o2 = expr_gen.get_operator(vovoc->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovoc->f0(); binary_functor_t f2 = vovoc->f1(); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("c0 o0 (v0 o1 (v1 o2 c1))\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_vococov_expression1 { typedef typename vococov_t::type1 node_type; typedef typename vococov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 (c0 o1 (c1 o2 v1)) typedef typename synthesize_cocov_expression1::node_type lcl_cocov_t; const lcl_cocov_t* cocov = static_cast<const lcl_cocov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type c0 = cocov->t0(); const Type c1 = cocov->t1(); const Type& v1 = cocov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(cocov->f0()); const details::operator_type o2 = expr_gen.get_operator(cocov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = cocov->f0(); binary_functor_t f2 = cocov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 (c0 o1 (c1 o2 v1))\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "(t" << expr_gen.to_str(o2) << "t))"); } }; struct synthesize_vovovov_expression2 { typedef typename vovovov_t::type2 node_type; typedef typename vovovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 ((v1 o1 v2) o2 v3) typedef typename synthesize_vovov_expression0::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vovov->t0(); const Type& v2 = vovov->t1(); const Type& v3 = vovov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovov->f0()); const details::operator_type o2 = expr_gen.get_operator(vovov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovov->f0(); binary_functor_t f2 = vovov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, v3, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 ((v1 o1 v2) o2 v3)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, v3, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vovovoc_expression2 { typedef typename vovovoc_t::type2 node_type; typedef typename vovovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 ((v1 o1 v2) o2 c) typedef typename synthesize_vovoc_expression0::node_type lcl_vovoc_t; const lcl_vovoc_t* vovoc = static_cast<const lcl_vovoc_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vovoc->t0(); const Type& v2 = vovoc->t1(); const Type c = vovoc->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovoc->f0()); const details::operator_type o2 = expr_gen.get_operator(vovoc->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovoc->f0(); binary_functor_t f2 = vovoc->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 ((v1 o1 v2) o2 c)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, c, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vovocov_expression2 { typedef typename vovocov_t::type2 node_type; typedef typename vovocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 ((v1 o1 c) o2 v2) typedef typename synthesize_vocov_expression0::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type& v1 = vocov->t0(); const Type c = vocov->t1(); const Type& v2 = vocov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vocov->f0()); const details::operator_type o2 = expr_gen.get_operator(vocov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vocov->f0(); binary_functor_t f2 = vocov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, c, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 ((v1 o1 c) o2 v2)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vocovov_expression2 { typedef typename vocovov_t::type2 node_type; typedef typename vocovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 ((c o1 v1) o2 v2) typedef typename synthesize_covov_expression0::node_type lcl_covov_t; const lcl_covov_t* covov = static_cast<const lcl_covov_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type c = covov->t0(); const Type& v1 = covov->t1(); const Type& v2 = covov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(covov->f0()); const details::operator_type o2 = expr_gen.get_operator(covov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = covov->f0(); binary_functor_t f2 = covov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 ((c o1 v1) o2 v2)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_covovov_expression2 { typedef typename covovov_t::type2 node_type; typedef typename covovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // c o0 ((v1 o1 v2) o2 v3) typedef typename synthesize_vovov_expression0::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[1]); const Type c = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vovov->t0(); const Type& v1 = vovov->t1(); const Type& v2 = vovov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovov->f0()); const details::operator_type o2 = expr_gen.get_operator(vovov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovov->f0(); binary_functor_t f2 = vovov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c, v0, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("c o0 ((v1 o1 v2) o2 v3)\n")); return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_covocov_expression2 { typedef typename covocov_t::type2 node_type; typedef typename covocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // c0 o0 ((v0 o1 c1) o2 v1) typedef typename synthesize_vocov_expression0::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[1]); const Type c0 = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vocov->t0(); const Type c1 = vocov->t1(); const Type& v1 = vocov->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vocov->f0()); const details::operator_type o2 = expr_gen.get_operator(vocov->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vocov->f0(); binary_functor_t f2 = vocov->f1(); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("c0 o0 ((v0 o1 c1) o2 v1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vocovoc_expression2 { typedef typename vocovoc_t::type2 node_type; typedef typename vocovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // v0 o0 ((c0 o1 v1) o2 c1) typedef typename synthesize_covoc_expression0::node_type lcl_covoc_t; const lcl_covoc_t* covoc = static_cast<const lcl_covoc_t*>(branch[1]); const Type& v0 = static_cast<details::variable_node<Type>*>(branch[0])->ref(); const Type c0 = covoc->t0(); const Type& v1 = covoc->t1(); const Type c1 = covoc->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(covoc->f0()); const details::operator_type o2 = expr_gen.get_operator(covoc->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = covoc->f0(); binary_functor_t f2 = covoc->f1(); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("v0 o0 ((c0 o1 v1) o2 c1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_covovoc_expression2 { typedef typename covovoc_t::type2 node_type; typedef typename covovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // c0 o0 ((v0 o1 v1) o2 c1) typedef typename synthesize_vovoc_expression0::node_type lcl_vovoc_t; const lcl_vovoc_t* vovoc = static_cast<const lcl_vovoc_t*>(branch[1]); const Type c0 = static_cast<details::literal_node<Type>*>(branch[0])->value(); const Type& v0 = vovoc->t0(); const Type& v1 = vovoc->t1(); const Type c1 = vovoc->t2(); const details::operator_type o0 = operation; const details::operator_type o1 = expr_gen.get_operator(vovoc->f0()); const details::operator_type o2 = expr_gen.get_operator(vovoc->f1()); binary_functor_t f0 = reinterpret_cast<binary_functor_t>(0); binary_functor_t f1 = vovoc->f0(); binary_functor_t f2 = vovoc->f1(); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o0,f0)) return error_node(); exprtk_debug(("c0 o0 ((v0 o1 v1) o2 c1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "t" << expr_gen.to_str(o0) << "((t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t)"); } }; struct synthesize_vococov_expression2 { typedef typename vococov_t::type2 node_type; static inline expression_node_ptr process(expression_generator<Type>&, const details::operator_type&, expression_node_ptr (&)[2]) { // v0 o0 ((c0 o1 c1) o2 v1) - Not possible exprtk_debug(("v0 o0 ((c0 o1 c1) o2 v1) - Not possible\n")); return error_node(); } static inline std::string id(expression_generator<Type>&, const details::operator_type, const details::operator_type, const details::operator_type) { return "INVALID"; } }; struct synthesize_vovovov_expression3 { typedef typename vovovov_t::type3 node_type; typedef typename vovovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 v1) o1 v2) o2 v3 typedef typename synthesize_vovov_expression0::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[0]); const Type& v0 = vovov->t0(); const Type& v1 = vovov->t1(); const Type& v2 = vovov->t2(); const Type& v3 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vovov->f0()); const details::operator_type o1 = expr_gen.get_operator(vovov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vovov->f0(); binary_functor_t f1 = vovov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, v3, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 v1) o1 v2) o2 v3\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, v3, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vovovoc_expression3 { typedef typename vovovoc_t::type3 node_type; typedef typename vovovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 v1) o1 v2) o2 c typedef typename synthesize_vovov_expression0::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[0]); const Type& v0 = vovov->t0(); const Type& v1 = vovov->t1(); const Type& v2 = vovov->t2(); const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = expr_gen.get_operator(vovov->f0()); const details::operator_type o1 = expr_gen.get_operator(vovov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vovov->f0(); binary_functor_t f1 = vovov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 v1) o1 v2) o2 c\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, c, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vovocov_expression3 { typedef typename vovocov_t::type3 node_type; typedef typename vovocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 v1) o1 c) o2 v2 typedef typename synthesize_vovoc_expression0::node_type lcl_vovoc_t; const lcl_vovoc_t* vovoc = static_cast<const lcl_vovoc_t*>(branch[0]); const Type& v0 = vovoc->t0(); const Type& v1 = vovoc->t1(); const Type c = vovoc->t2(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vovoc->f0()); const details::operator_type o1 = expr_gen.get_operator(vovoc->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vovoc->f0(); binary_functor_t f1 = vovoc->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, c, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 v1) o1 c) o2 v2\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vocovov_expression3 { typedef typename vocovov_t::type3 node_type; typedef typename vocovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 c) o1 v1) o2 v2 typedef typename synthesize_vocov_expression0::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[0]); const Type& v0 = vocov->t0(); const Type c = vocov->t1(); const Type& v1 = vocov->t2(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vocov->f0()); const details::operator_type o1 = expr_gen.get_operator(vocov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vocov->f0(); binary_functor_t f1 = vocov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 c) o1 v1) o2 v2\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_covovov_expression3 { typedef typename covovov_t::type3 node_type; typedef typename covovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((c o0 v0) o1 v1) o2 v2 typedef typename synthesize_covov_expression0::node_type lcl_covov_t; const lcl_covov_t* covov = static_cast<const lcl_covov_t*>(branch[0]); const Type c = covov->t0(); const Type& v0 = covov->t1(); const Type& v1 = covov->t2(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(covov->f0()); const details::operator_type o1 = expr_gen.get_operator(covov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = covov->f0(); binary_functor_t f1 = covov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c, v0, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((c o0 v0) o1 v1) o2 v2\n")); return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_covocov_expression3 { typedef typename covocov_t::type3 node_type; typedef typename covocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((c0 o0 v0) o1 c1) o2 v1 typedef typename synthesize_covoc_expression0::node_type lcl_covoc_t; const lcl_covoc_t* covoc = static_cast<const lcl_covoc_t*>(branch[0]); const Type c0 = covoc->t0(); const Type& v0 = covoc->t1(); const Type c1 = covoc->t2(); const Type& v1 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(covoc->f0()); const details::operator_type o1 = expr_gen.get_operator(covoc->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = covoc->f0(); binary_functor_t f1 = covoc->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((c0 o0 v0) o1 c1) o2 v1\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vocovoc_expression3 { typedef typename vocovoc_t::type3 node_type; typedef typename vocovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 c0) o1 v1) o2 c1 typedef typename synthesize_vocov_expression0::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[0]); const Type& v0 = vocov->t0(); const Type c0 = vocov->t1(); const Type& v1 = vocov->t2(); const Type c1 = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = expr_gen.get_operator(vocov->f0()); const details::operator_type o1 = expr_gen.get_operator(vocov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vocov->f0(); binary_functor_t f1 = vocov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 c0) o1 v1) o2 c1\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_covovoc_expression3 { typedef typename covovoc_t::type3 node_type; typedef typename covovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((c0 o0 v0) o1 v1) o2 c1 typedef typename synthesize_covov_expression0::node_type lcl_covov_t; const lcl_covov_t* covov = static_cast<const lcl_covov_t*>(branch[0]); const Type c0 = covov->t0(); const Type& v0 = covov->t1(); const Type& v1 = covov->t2(); const Type c1 = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = expr_gen.get_operator(covov->f0()); const details::operator_type o1 = expr_gen.get_operator(covov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = covov->f0(); binary_functor_t f1 = covov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((c0 o0 v0) o1 v1) o2 c1\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vococov_expression3 { typedef typename vococov_t::type3 node_type; typedef typename vococov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 c0) o1 c1) o2 v1 typedef typename synthesize_vococ_expression0::node_type lcl_vococ_t; const lcl_vococ_t* vococ = static_cast<const lcl_vococ_t*>(branch[0]); const Type& v0 = vococ->t0(); const Type c0 = vococ->t1(); const Type c1 = vococ->t2(); const Type& v1 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vococ->f0()); const details::operator_type o1 = expr_gen.get_operator(vococ->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vococ->f0(); binary_functor_t f1 = vococ->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 c0) o1 c1) o2 v1\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "((t" << expr_gen.to_str(o0) << "t)" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vovovov_expression4 { typedef typename vovovov_t::type4 node_type; typedef typename vovovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // (v0 o0 (v1 o1 v2)) o2 v3 typedef typename synthesize_vovov_expression1::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[0]); const Type& v0 = vovov->t0(); const Type& v1 = vovov->t1(); const Type& v2 = vovov->t2(); const Type& v3 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vovov->f0()); const details::operator_type o1 = expr_gen.get_operator(vovov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vovov->f0(); binary_functor_t f1 = vovov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, v3, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("(v0 o0 (v1 o1 v2)) o2 v3\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, v3, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vovovoc_expression4 { typedef typename vovovoc_t::type4 node_type; typedef typename vovovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 (v1 o1 v2)) o2 c) typedef typename synthesize_vovov_expression1::node_type lcl_vovov_t; const lcl_vovov_t* vovov = static_cast<const lcl_vovov_t*>(branch[0]); const Type& v0 = vovov->t0(); const Type& v1 = vovov->t1(); const Type& v2 = vovov->t2(); const Type c = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = expr_gen.get_operator(vovov->f0()); const details::operator_type o1 = expr_gen.get_operator(vovov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vovov->f0(); binary_functor_t f1 = vovov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, v2, c, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 (v1 o1 v2)) o2 c)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, v2, c, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vovocov_expression4 { typedef typename vovocov_t::type4 node_type; typedef typename vovocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 (v1 o1 c)) o2 v1) typedef typename synthesize_vovoc_expression1::node_type lcl_vovoc_t; const lcl_vovoc_t* vovoc = static_cast<const lcl_vovoc_t*>(branch[0]); const Type& v0 = vovoc->t0(); const Type& v1 = vovoc->t1(); const Type c = vovoc->t2(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vovoc->f0()); const details::operator_type o1 = expr_gen.get_operator(vovoc->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vovoc->f0(); binary_functor_t f1 = vovoc->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, v1, c, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 (v1 o1 c)) o2 v1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, v1, c, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vocovov_expression4 { typedef typename vocovov_t::type4 node_type; typedef typename vocovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 (c o1 v1)) o2 v2) typedef typename synthesize_vocov_expression1::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[0]); const Type& v0 = vocov->t0(); const Type c = vocov->t1(); const Type& v1 = vocov->t2(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(vocov->f0()); const details::operator_type o1 = expr_gen.get_operator(vocov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vocov->f0(); binary_functor_t f1 = vocov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 (c o1 v1)) o2 v2)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_covovov_expression4 { typedef typename covovov_t::type4 node_type; typedef typename covovov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((c o0 (v0 o1 v1)) o2 v2) typedef typename synthesize_covov_expression1::node_type lcl_covov_t; const lcl_covov_t* covov = static_cast<const lcl_covov_t*>(branch[0]); const Type c = covov->t0(); const Type& v0 = covov->t1(); const Type& v1 = covov->t2(); const Type& v2 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(covov->f0()); const details::operator_type o1 = expr_gen.get_operator(covov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = covov->f0(); binary_functor_t f1 = covov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c, v0, v1, v2, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((c o0 (v0 o1 v1)) o2 v2)\n")); return node_type::allocate(*(expr_gen.node_allocator_), c, v0, v1, v2, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_covocov_expression4 { typedef typename covocov_t::type4 node_type; typedef typename covocov_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((c0 o0 (v0 o1 c1)) o2 v1) typedef typename synthesize_covoc_expression1::node_type lcl_covoc_t; const lcl_covoc_t* covoc = static_cast<const lcl_covoc_t*>(branch[0]); const Type c0 = covoc->t0(); const Type& v0 = covoc->t1(); const Type c1 = covoc->t2(); const Type& v1 = static_cast<details::variable_node<Type>*>(branch[1])->ref(); const details::operator_type o0 = expr_gen.get_operator(covoc->f0()); const details::operator_type o1 = expr_gen.get_operator(covoc->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = covoc->f0(); binary_functor_t f1 = covoc->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, c1, v1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((c0 o0 (v0 o1 c1)) o2 v1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, c1, v1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vocovoc_expression4 { typedef typename vocovoc_t::type4 node_type; typedef typename vocovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((v0 o0 (c0 o1 v1)) o2 c1) typedef typename synthesize_vocov_expression1::node_type lcl_vocov_t; const lcl_vocov_t* vocov = static_cast<const lcl_vocov_t*>(branch[0]); const Type& v0 = vocov->t0(); const Type c0 = vocov->t1(); const Type& v1 = vocov->t2(); const Type c1 = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = expr_gen.get_operator(vocov->f0()); const details::operator_type o1 = expr_gen.get_operator(vocov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = vocov->f0(); binary_functor_t f1 = vocov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), v0, c0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((v0 o0 (c0 o1 v1)) o2 c1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), v0, c0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_covovoc_expression4 { typedef typename covovoc_t::type4 node_type; typedef typename covovoc_t::sf4_type sf4_type; typedef typename node_type::T0 T0; typedef typename node_type::T1 T1; typedef typename node_type::T2 T2; typedef typename node_type::T3 T3; static inline expression_node_ptr process(expression_generator<Type>& expr_gen, const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // ((c0 o0 (v0 o1 v1)) o2 c1) typedef typename synthesize_covov_expression1::node_type lcl_covov_t; const lcl_covov_t* covov = static_cast<const lcl_covov_t*>(branch[0]); const Type c0 = covov->t0(); const Type& v0 = covov->t1(); const Type& v1 = covov->t2(); const Type c1 = static_cast<details::literal_node<Type>*>(branch[1])->value(); const details::operator_type o0 = expr_gen.get_operator(covov->f0()); const details::operator_type o1 = expr_gen.get_operator(covov->f1()); const details::operator_type o2 = operation; binary_functor_t f0 = covov->f0(); binary_functor_t f1 = covov->f1(); binary_functor_t f2 = reinterpret_cast<binary_functor_t>(0); details::free_node(*(expr_gen.node_allocator_),branch[0]); details::free_node(*(expr_gen.node_allocator_),branch[1]); expression_node_ptr result = error_node(); const bool synthesis_result = synthesize_sf4ext_expression::template compile<T0, T1, T2, T3> (expr_gen, id(expr_gen, o0, o1, o2), c0, v0, v1, c1, result); if (synthesis_result) return result; else if (!expr_gen.valid_operator(o2,f2)) return error_node(); exprtk_debug(("((c0 o0 (v0 o1 v1)) o2 c1)\n")); return node_type::allocate(*(expr_gen.node_allocator_), c0, v0, v1, c1, f0, f1, f2); } static inline std::string id(expression_generator<Type>& expr_gen, const details::operator_type o0, const details::operator_type o1, const details::operator_type o2) { return (details::build_string() << "(t" << expr_gen.to_str(o0) << "(t" << expr_gen.to_str(o1) << "t)" << expr_gen.to_str(o2) << "t"); } }; struct synthesize_vococov_expression4 { typedef typename vococov_t::type4 node_type; static inline expression_node_ptr process(expression_generator<Type>&, const details::operator_type&, expression_node_ptr (&)[2]) { // ((v0 o0 (c0 o1 c1)) o2 v1) - Not possible exprtk_debug(("((v0 o0 (c0 o1 c1)) o2 v1) - Not possible\n")); return error_node(); } static inline std::string id(expression_generator<Type>&, const details::operator_type, const details::operator_type, const details::operator_type) { return "INVALID"; } }; #endif inline expression_node_ptr synthesize_uvouv_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { // Definition: uv o uv details::operator_type o0 = static_cast<details::uv_base_node<Type>*>(branch[0])->operation(); details::operator_type o1 = static_cast<details::uv_base_node<Type>*>(branch[1])->operation(); const Type& v0 = static_cast<details::uv_base_node<Type>*>(branch[0])->v(); const Type& v1 = static_cast<details::uv_base_node<Type>*>(branch[1])->v(); unary_functor_t u0 = reinterpret_cast<unary_functor_t> (0); unary_functor_t u1 = reinterpret_cast<unary_functor_t> (0); binary_functor_t f = reinterpret_cast<binary_functor_t>(0); if (!valid_operator(o0,u0)) return error_node(); else if (!valid_operator(o1,u1)) return error_node(); else if (!valid_operator(operation,f)) return error_node(); expression_node_ptr result = error_node(); if ( (details::e_neg == o0) && (details::e_neg == o1) ) { switch (operation) { // (-v0 + -v1) --> -(v0 + v1) case details::e_add : result = (*this)(details::e_neg, node_allocator_-> allocate_rr<typename details:: vov_node<Type,details::add_op<Type> > >(v0, v1)); exprtk_debug(("(-v0 + -v1) --> -(v0 + v1)\n")); break; // (-v0 - -v1) --> (v1 - v0) case details::e_sub : result = node_allocator_-> allocate_rr<typename details:: vov_node<Type,details::sub_op<Type> > >(v1, v0); exprtk_debug(("(-v0 - -v1) --> (v1 - v0)\n")); break; // (-v0 * -v1) --> (v0 * v1) case details::e_mul : result = node_allocator_-> allocate_rr<typename details:: vov_node<Type,details::mul_op<Type> > >(v0, v1); exprtk_debug(("(-v0 * -v1) --> (v0 * v1)\n")); break; // (-v0 / -v1) --> (v0 / v1) case details::e_div : result = node_allocator_-> allocate_rr<typename details:: vov_node<Type,details::div_op<Type> > >(v0, v1); exprtk_debug(("(-v0 / -v1) --> (v0 / v1)\n")); break; default : break; } } if (0 == result) { result = node_allocator_-> allocate_rrrrr<typename details::uvouv_node<Type> >(v0, v1, u0, u1, f); } details::free_all_nodes(*node_allocator_,branch); return result; } #undef basic_opr_switch_statements #undef extended_opr_switch_statements #undef unary_opr_switch_statements #ifndef exprtk_disable_string_capabilities #define string_opr_switch_statements \ case_stmt(details:: e_lt ,details:: lt_op) \ case_stmt(details:: e_lte ,details:: lte_op) \ case_stmt(details:: e_gt ,details:: gt_op) \ case_stmt(details:: e_gte ,details:: gte_op) \ case_stmt(details:: e_eq ,details:: eq_op) \ case_stmt(details:: e_ne ,details:: ne_op) \ case_stmt(details::e_in ,details:: in_op) \ case_stmt(details::e_like ,details:: like_op) \ case_stmt(details::e_ilike,details::ilike_op) \ template <typename T0, typename T1> inline expression_node_ptr synthesize_str_xrox_expression_impl(const details::operator_type& opr, T0 s0, T1 s1, range_t rp0) { switch (opr) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate_ttt<typename details::str_xrox_node<Type,T0,T1,range_t,op1<Type> >,T0,T1> \ (s0, s1, rp0); \ string_opr_switch_statements #undef case_stmt default : return error_node(); } } template <typename T0, typename T1> inline expression_node_ptr synthesize_str_xoxr_expression_impl(const details::operator_type& opr, T0 s0, T1 s1, range_t rp1) { switch (opr) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate_ttt<typename details::str_xoxr_node<Type,T0,T1,range_t,op1<Type> >,T0,T1> \ (s0, s1, rp1); \ string_opr_switch_statements #undef case_stmt default : return error_node(); } } template <typename T0, typename T1> inline expression_node_ptr synthesize_str_xroxr_expression_impl(const details::operator_type& opr, T0 s0, T1 s1, range_t rp0, range_t rp1) { switch (opr) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate_tttt<typename details::str_xroxr_node<Type,T0,T1,range_t,op1<Type> >,T0,T1> \ (s0, s1, rp0, rp1); \ string_opr_switch_statements #undef case_stmt default : return error_node(); } } template <typename T0, typename T1> inline expression_node_ptr synthesize_sos_expression_impl(const details::operator_type& opr, T0 s0, T1 s1) { switch (opr) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate_tt<typename details::sos_node<Type,T0,T1,op1<Type> >,T0,T1>(s0, s1); \ string_opr_switch_statements #undef case_stmt default : return error_node(); } } inline expression_node_ptr synthesize_sos_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::stringvar_node<Type>*>(branch[0])->ref(); std::string& s1 = static_cast<details::stringvar_node<Type>*>(branch[1])->ref(); return synthesize_sos_expression_impl<std::string&,std::string&>(opr, s0, s1); } inline expression_node_ptr synthesize_sros_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::string_range_node<Type>*>(branch[0])->ref (); std::string& s1 = static_cast<details::stringvar_node<Type>*> (branch[1])->ref (); range_t rp0 = static_cast<details::string_range_node<Type>*>(branch[0])->range(); static_cast<details::string_range_node<Type>*>(branch[0])->range_ref().clear(); free_node(*node_allocator_,branch[0]); return synthesize_str_xrox_expression_impl<std::string&,std::string&>(opr, s0, s1, rp0); } inline expression_node_ptr synthesize_sosr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::stringvar_node<Type>*> (branch[0])->ref (); std::string& s1 = static_cast<details::string_range_node<Type>*>(branch[1])->ref (); range_t rp1 = static_cast<details::string_range_node<Type>*>(branch[1])->range(); static_cast<details::string_range_node<Type>*>(branch[1])->range_ref().clear(); free_node(*node_allocator_,branch[1]); return synthesize_str_xoxr_expression_impl<std::string&,std::string&>(opr, s0, s1, rp1); } inline expression_node_ptr synthesize_socsr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::stringvar_node<Type>*> (branch[0])->ref (); std::string s1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->str (); range_t rp1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->range(); static_cast<details::const_string_range_node<Type>*>(branch[1])->range_ref().clear(); free_node(*node_allocator_,branch[1]); return synthesize_str_xoxr_expression_impl<std::string&, const std::string>(opr, s0, s1, rp1); } inline expression_node_ptr synthesize_srosr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::string_range_node<Type>*>(branch[0])->ref (); std::string& s1 = static_cast<details::string_range_node<Type>*>(branch[1])->ref (); range_t rp0 = static_cast<details::string_range_node<Type>*>(branch[0])->range(); range_t rp1 = static_cast<details::string_range_node<Type>*>(branch[1])->range(); static_cast<details::string_range_node<Type>*>(branch[0])->range_ref().clear(); static_cast<details::string_range_node<Type>*>(branch[1])->range_ref().clear(); details::free_node(*node_allocator_,branch[0]); details::free_node(*node_allocator_,branch[1]); return synthesize_str_xroxr_expression_impl<std::string&,std::string&>(opr, s0, s1, rp0, rp1); } inline expression_node_ptr synthesize_socs_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast< details::stringvar_node<Type>*>(branch[0])->ref(); std::string s1 = static_cast<details::string_literal_node<Type>*>(branch[1])->str(); details::free_node(*node_allocator_,branch[1]); return synthesize_sos_expression_impl<std::string&, const std::string>(opr, s0, s1); } inline expression_node_ptr synthesize_csos_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string s0 = static_cast<details::string_literal_node<Type>*>(branch[0])->str(); std::string& s1 = static_cast< details::stringvar_node<Type>*>(branch[1])->ref(); details::free_node(*node_allocator_,branch[0]); return synthesize_sos_expression_impl<const std::string,std::string&>(opr, s0, s1); } inline expression_node_ptr synthesize_csosr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string s0 = static_cast<details::string_literal_node<Type>*>(branch[0])->str (); std::string& s1 = static_cast<details::string_range_node<Type>*> (branch[1])->ref (); range_t rp1 = static_cast<details::string_range_node<Type>*> (branch[1])->range(); static_cast<details::string_range_node<Type>*>(branch[1])->range_ref().clear(); details::free_node(*node_allocator_,branch[0]); details::free_node(*node_allocator_,branch[1]); return synthesize_str_xoxr_expression_impl<const std::string,std::string&>(opr, s0, s1, rp1); } inline expression_node_ptr synthesize_srocs_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::string_range_node<Type>*> (branch[0])->ref (); std::string s1 = static_cast<details::string_literal_node<Type>*>(branch[1])->str (); range_t rp0 = static_cast<details::string_range_node<Type>*> (branch[0])->range(); static_cast<details::string_range_node<Type>*>(branch[0])->range_ref().clear(); details::free_node(*node_allocator_,branch[0]); details::free_node(*node_allocator_,branch[1]); return synthesize_str_xrox_expression_impl<std::string&, const std::string>(opr, s0, s1, rp0); } inline expression_node_ptr synthesize_srocsr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string& s0 = static_cast<details::string_range_node<Type>*> (branch[0])->ref (); std::string s1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->str (); range_t rp0 = static_cast<details::string_range_node<Type>*> (branch[0])->range(); range_t rp1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->range(); static_cast<details::string_range_node<Type>*> (branch[0])->range_ref().clear(); static_cast<details::const_string_range_node<Type>*>(branch[1])->range_ref().clear(); details::free_node(*node_allocator_,branch[0]); details::free_node(*node_allocator_,branch[1]); return synthesize_str_xroxr_expression_impl<std::string&, const std::string>(opr, s0, s1, rp0, rp1); } inline expression_node_ptr synthesize_csocs_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { const std::string s0 = static_cast<details::string_literal_node<Type>*>(branch[0])->str(); const std::string s1 = static_cast<details::string_literal_node<Type>*>(branch[1])->str(); expression_node_ptr result = error_node(); if (details::e_add == opr) result = node_allocator_->allocate_c<details::string_literal_node<Type> >(s0 + s1); else if (details::e_in == opr) result = node_allocator_->allocate_c<details::literal_node<Type> >(details::in_op <Type>::process(s0,s1)); else if (details::e_like == opr) result = node_allocator_->allocate_c<details::literal_node<Type> >(details::like_op <Type>::process(s0,s1)); else if (details::e_ilike == opr) result = node_allocator_->allocate_c<details::literal_node<Type> >(details::ilike_op<Type>::process(s0,s1)); else { expression_node_ptr temp = synthesize_sos_expression_impl<const std::string, const std::string>(opr, s0, s1); const Type v = temp->value(); details::free_node(*node_allocator_,temp); result = node_allocator_->allocate<literal_node_t>(v); } details::free_all_nodes(*node_allocator_,branch); return result; } inline expression_node_ptr synthesize_csocsr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { const std::string s0 = static_cast<details::string_literal_node<Type>*> (branch[0])->str (); std::string s1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->str (); range_t rp1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->range(); static_cast<details::const_string_range_node<Type>*>(branch[1])->range_ref().clear(); free_node(*node_allocator_,branch[0]); free_node(*node_allocator_,branch[1]); return synthesize_str_xoxr_expression_impl<const std::string, const std::string>(opr, s0, s1, rp1); } inline expression_node_ptr synthesize_csros_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string s0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->str (); std::string& s1 = static_cast<details::stringvar_node<Type>*> (branch[1])->ref (); range_t rp0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->range(); static_cast<details::const_string_range_node<Type>*>(branch[0])->range_ref().clear(); free_node(*node_allocator_,branch[0]); return synthesize_str_xrox_expression_impl<const std::string,std::string&>(opr, s0, s1, rp0); } inline expression_node_ptr synthesize_csrosr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { const std::string s0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->str (); std::string& s1 = static_cast<details::string_range_node<Type>*> (branch[1])->ref (); range_t rp0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->range(); range_t rp1 = static_cast<details::string_range_node<Type>*> (branch[1])->range(); static_cast<details::const_string_range_node<Type>*>(branch[0])->range_ref().clear(); static_cast<details::string_range_node<Type>*> (branch[1])->range_ref().clear(); free_node(*node_allocator_,branch[0]); free_node(*node_allocator_,branch[1]); return synthesize_str_xroxr_expression_impl<const std::string,std::string&>(opr, s0, s1, rp0, rp1); } inline expression_node_ptr synthesize_csrocs_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string s0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->str (); const std::string s1 = static_cast<details::string_literal_node<Type>*> (branch[1])->str (); range_t rp0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->range(); static_cast<details::const_string_range_node<Type>*>(branch[0])->range_ref().clear(); details::free_all_nodes(*node_allocator_,branch); return synthesize_str_xrox_expression_impl<const std::string,std::string>(opr, s0, s1, rp0); } inline expression_node_ptr synthesize_csrocsr_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { std::string s0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->str (); std::string s1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->str (); range_t rp0 = static_cast<details::const_string_range_node<Type>*>(branch[0])->range(); range_t rp1 = static_cast<details::const_string_range_node<Type>*>(branch[1])->range(); static_cast<details::const_string_range_node<Type>*>(branch[0])->range_ref().clear(); static_cast<details::const_string_range_node<Type>*>(branch[1])->range_ref().clear(); details::free_all_nodes(*node_allocator_,branch); return synthesize_str_xroxr_expression_impl<const std::string, const std::string>(opr, s0, s1, rp0, rp1); } inline expression_node_ptr synthesize_strogen_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { switch (opr) { #define case_stmt(op0,op1) \ case op0 : return node_allocator_-> \ allocate_ttt<typename details::str_sogens_node<Type,op1<Type> > > \ (opr, branch[0], branch[1]); \ string_opr_switch_statements #undef case_stmt default : return error_node(); } } #endif #ifndef exprtk_disable_string_capabilities inline expression_node_ptr synthesize_string_expression(const details::operator_type& opr, expression_node_ptr (&branch)[2]) { if ((0 == branch[0]) || (0 == branch[1])) { details::free_all_nodes(*node_allocator_,branch); return error_node(); } const bool b0_is_s = details::is_string_node (branch[0]); const bool b0_is_cs = details::is_const_string_node (branch[0]); const bool b0_is_sr = details::is_string_range_node (branch[0]); const bool b0_is_csr = details::is_const_string_range_node(branch[0]); const bool b1_is_s = details::is_string_node (branch[1]); const bool b1_is_cs = details::is_const_string_node (branch[1]); const bool b1_is_sr = details::is_string_range_node (branch[1]); const bool b1_is_csr = details::is_const_string_range_node(branch[1]); const bool b0_is_gen = details::is_string_assignment_node (branch[0]) || details::is_genricstring_range_node(branch[0]) || details::is_string_concat_node (branch[0]) || details::is_string_function_node (branch[0]) || details::is_string_condition_node (branch[0]) || details::is_string_ccondition_node (branch[0]) || details::is_string_vararg_node (branch[0]) ; const bool b1_is_gen = details::is_string_assignment_node (branch[1]) || details::is_genricstring_range_node(branch[1]) || details::is_string_concat_node (branch[1]) || details::is_string_function_node (branch[1]) || details::is_string_condition_node (branch[1]) || details::is_string_ccondition_node (branch[1]) || details::is_string_vararg_node (branch[1]) ; if (details::e_add == opr) { if (!b0_is_cs || !b1_is_cs) { return synthesize_expression<string_concat_node_t,2>(opr,branch); } } if (b0_is_gen || b1_is_gen) { return synthesize_strogen_expression(opr,branch); } else if (b0_is_s) { if (b1_is_s ) return synthesize_sos_expression (opr,branch); else if (b1_is_cs ) return synthesize_socs_expression (opr,branch); else if (b1_is_sr ) return synthesize_sosr_expression (opr,branch); else if (b1_is_csr) return synthesize_socsr_expression (opr,branch); } else if (b0_is_cs) { if (b1_is_s ) return synthesize_csos_expression (opr,branch); else if (b1_is_cs ) return synthesize_csocs_expression (opr,branch); else if (b1_is_sr ) return synthesize_csosr_expression (opr,branch); else if (b1_is_csr) return synthesize_csocsr_expression(opr,branch); } else if (b0_is_sr) { if (b1_is_s ) return synthesize_sros_expression (opr,branch); else if (b1_is_sr ) return synthesize_srosr_expression (opr,branch); else if (b1_is_cs ) return synthesize_srocs_expression (opr,branch); else if (b1_is_csr) return synthesize_srocsr_expression(opr,branch); } else if (b0_is_csr) { if (b1_is_s ) return synthesize_csros_expression (opr,branch); else if (b1_is_sr ) return synthesize_csrosr_expression (opr,branch); else if (b1_is_cs ) return synthesize_csrocs_expression (opr,branch); else if (b1_is_csr) return synthesize_csrocsr_expression(opr,branch); } return error_node(); } #else inline expression_node_ptr synthesize_string_expression(const details::operator_type&, expression_node_ptr (&branch)[2]) { details::free_all_nodes(*node_allocator_,branch); return error_node(); } #endif #ifndef exprtk_disable_string_capabilities inline expression_node_ptr synthesize_string_expression(const details::operator_type& opr, expression_node_ptr (&branch)[3]) { if (details::e_inrange != opr) return error_node(); else if ((0 == branch[0]) || (0 == branch[1]) || (0 == branch[2])) { details::free_all_nodes(*node_allocator_,branch); return error_node(); } else if ( details::is_const_string_node(branch[0]) && details::is_const_string_node(branch[1]) && details::is_const_string_node(branch[2]) ) { const std::string s0 = static_cast<details::string_literal_node<Type>*>(branch[0])->str(); const std::string s1 = static_cast<details::string_literal_node<Type>*>(branch[1])->str(); const std::string s2 = static_cast<details::string_literal_node<Type>*>(branch[2])->str(); const Type v = (((s0 <= s1) && (s1 <= s2)) ? Type(1) : Type(0)); details::free_all_nodes(*node_allocator_,branch); return node_allocator_->allocate_c<details::literal_node<Type> >(v); } else if ( details::is_string_node(branch[0]) && details::is_string_node(branch[1]) && details::is_string_node(branch[2]) ) { std::string& s0 = static_cast<details::stringvar_node<Type>*>(branch[0])->ref(); std::string& s1 = static_cast<details::stringvar_node<Type>*>(branch[1])->ref(); std::string& s2 = static_cast<details::stringvar_node<Type>*>(branch[2])->ref(); typedef typename details::sosos_node<Type,std::string&,std::string&,std::string&,details::inrange_op<Type> > inrange_t; return node_allocator_->allocate_type<inrange_t,std::string&,std::string&,std::string&>(s0,s1,s2); } else if ( details::is_const_string_node(branch[0]) && details::is_string_node(branch[1]) && details::is_const_string_node(branch[2]) ) { std::string s0 = static_cast<details::string_literal_node<Type>*>(branch[0])->str(); std::string& s1 = static_cast< details::stringvar_node<Type>*>(branch[1])->ref(); std::string s2 = static_cast<details::string_literal_node<Type>*>(branch[2])->str(); typedef typename details::sosos_node<Type,std::string,std::string&,std::string,details::inrange_op<Type> > inrange_t; details::free_node(*node_allocator_,branch[0]); details::free_node(*node_allocator_,branch[2]); return node_allocator_->allocate_type<inrange_t,std::string,std::string&,std::string>(s0,s1,s2); } else if ( details::is_string_node(branch[0]) && details::is_const_string_node(branch[1]) && details::is_string_node(branch[2]) ) { std::string& s0 = static_cast< details::stringvar_node<Type>*>(branch[0])->ref(); std::string s1 = static_cast<details::string_literal_node<Type>*>(branch[1])->str(); std::string& s2 = static_cast< details::stringvar_node<Type>*>(branch[2])->ref(); typedef typename details::sosos_node<Type,std::string&,std::string,std::string&,details::inrange_op<Type> > inrange_t; details::free_node(*node_allocator_,branch[1]); return node_allocator_->allocate_type<inrange_t,std::string&,std::string,std::string&>(s0,s1,s2); } else if ( details::is_string_node(branch[0]) && details::is_string_node(branch[1]) && details::is_const_string_node(branch[2]) ) { std::string& s0 = static_cast< details::stringvar_node<Type>*>(branch[0])->ref(); std::string& s1 = static_cast< details::stringvar_node<Type>*>(branch[1])->ref(); std::string s2 = static_cast<details::string_literal_node<Type>*>(branch[2])->str(); typedef typename details::sosos_node<Type,std::string&,std::string&,std::string,details::inrange_op<Type> > inrange_t; details::free_node(*node_allocator_,branch[2]); return node_allocator_->allocate_type<inrange_t,std::string&,std::string&,std::string>(s0,s1,s2); } else if ( details::is_const_string_node(branch[0]) && details:: is_string_node(branch[1]) && details:: is_string_node(branch[2]) ) { std::string s0 = static_cast<details::string_literal_node<Type>*>(branch[0])->str(); std::string& s1 = static_cast< details::stringvar_node<Type>*>(branch[1])->ref(); std::string& s2 = static_cast< details::stringvar_node<Type>*>(branch[2])->ref(); typedef typename details::sosos_node<Type,std::string,std::string&,std::string&,details::inrange_op<Type> > inrange_t; details::free_node(*node_allocator_,branch[0]); return node_allocator_->allocate_type<inrange_t,std::string,std::string&,std::string&>(s0,s1,s2); } else return error_node(); } #else inline expression_node_ptr synthesize_string_expression(const details::operator_type&, expression_node_ptr (&branch)[3]) { details::free_all_nodes(*node_allocator_,branch); return error_node(); } #endif inline expression_node_ptr synthesize_null_expression(const details::operator_type& operation, expression_node_ptr (&branch)[2]) { /* Note: The following are the type promotion rules that relate to operations that include 'null': 0. null ==/!= null --> true false 1. null operation null --> null 2. x ==/!= null --> true/false 3. null ==/!= x --> true/false 4. x operation null --> x 5. null operation x --> x */ typedef typename details::null_eq_node<T> nulleq_node_t; bool b0_null = details::is_null_node(branch[0]); bool b1_null = details::is_null_node(branch[1]); if (b0_null && b1_null) { expression_node_ptr result = error_node(); if (details::e_eq == operation) result = node_allocator_->allocate_c<literal_node_t>(T(1)); else if (details::e_ne == operation) result = node_allocator_->allocate_c<literal_node_t>(T(0)); if (result) { details::free_node(*node_allocator_,branch[0]); details::free_node(*node_allocator_,branch[1]); return result; } details::free_node(*node_allocator_,branch[1]); return branch[0]; } else if (details::e_eq == operation) { expression_node_ptr result = node_allocator_-> allocate_rc<nulleq_node_t>(branch[b0_null ? 0 : 1],true); details::free_node(*node_allocator_,branch[b0_null ? 1 : 0]); return result; } else if (details::e_ne == operation) { expression_node_ptr result = node_allocator_-> allocate_rc<nulleq_node_t>(branch[b0_null ? 0 : 1],false); details::free_node(*node_allocator_,branch[b0_null ? 1 : 0]); return result; } else if (b0_null) { details::free_node(*node_allocator_,branch[0]); branch[0] = branch[1]; branch[1] = error_node(); } else if (b1_null) { details::free_node(*node_allocator_,branch[1]); branch[1] = error_node(); } if ( (details::e_add == operation) || (details::e_sub == operation) || (details::e_mul == operation) || (details::e_div == operation) || (details::e_mod == operation) || (details::e_pow == operation) ) { return branch[0]; } else if ( (details::e_lt == operation) || (details::e_lte == operation) || (details::e_gt == operation) || (details::e_gte == operation) || (details::e_and == operation) || (details::e_nand == operation) || (details::e_or == operation) || (details::e_nor == operation) || (details::e_xor == operation) || (details::e_xnor == operation) || (details::e_in == operation) || (details::e_like == operation) || (details::e_ilike == operation) ) { return node_allocator_->allocate_c<literal_node_t>(T(0)); } details::free_node(*node_allocator_,branch[0]); return node_allocator_->allocate<details::null_node<Type> >(); } template <typename NodeType, std::size_t N> inline expression_node_ptr synthesize_expression(const details::operator_type& operation, expression_node_ptr (&branch)[N]) { if ( (details::e_in == operation) || (details::e_like == operation) || (details::e_ilike == operation) ) { free_all_nodes(*node_allocator_,branch); return error_node(); } else if (!details::all_nodes_valid<N>(branch)) { free_all_nodes(*node_allocator_,branch); return error_node(); } else if ((details::e_default != operation)) { // Attempt simple constant folding optimisation. expression_node_ptr expression_point = node_allocator_->allocate<NodeType>(operation,branch); if (is_constant_foldable<N>(branch)) { Type v = expression_point->value(); details::free_node(*node_allocator_,expression_point); return node_allocator_->allocate<literal_node_t>(v); } else return expression_point; } else return error_node(); } template <typename NodeType, std::size_t N> inline expression_node_ptr synthesize_expression(F* f, expression_node_ptr (&branch)[N]) { if (!details::all_nodes_valid<N>(branch)) { free_all_nodes(*node_allocator_,branch); return error_node(); } typedef typename details::function_N_node<T,ifunction_t,N> function_N_node_t; // Attempt simple constant folding optimisation. expression_node_ptr expression_point = node_allocator_->allocate<NodeType>(f); function_N_node_t* func_node_ptr = dynamic_cast<function_N_node_t*>(expression_point); if (0 == func_node_ptr) { free_all_nodes(*node_allocator_,branch); return error_node(); } else func_node_ptr->init_branches(branch); if (is_constant_foldable<N>(branch) && !f->has_side_effects()) { Type v = expression_point->value(); details::free_node(*node_allocator_,expression_point); return node_allocator_->allocate<literal_node_t>(v); } parser_->state_.activate_side_effect("synthesize_expression(function<NT,N>)"); return expression_point; } bool strength_reduction_enabled_; details::node_allocator* node_allocator_; synthesize_map_t synthesize_map_; unary_op_map_t* unary_op_map_; binary_op_map_t* binary_op_map_; inv_binary_op_map_t* inv_binary_op_map_; sf3_map_t* sf3_map_; sf4_map_t* sf4_map_; parser_t* parser_; }; inline void set_error(const parser_error::type& error_type) { error_list_.push_back(error_type); } inline void remove_last_error() { if (!error_list_.empty()) { error_list_.pop_back(); } } inline void set_synthesis_error(const std::string& synthesis_error_message) { if (synthesis_error_.empty()) { synthesis_error_ = synthesis_error_message; } } inline void register_local_vars(expression<T>& e) { for (std::size_t i = 0; i < sem_.size(); ++i) { scope_element& se = sem_.get_element(i); if ( (scope_element::e_variable == se.type) || (scope_element::e_vecelem == se.type) ) { if (se.var_node) { e.register_local_var(se.var_node); } if (se.data) { e.register_local_data(se.data, 1, 0); } } else if (scope_element::e_vector == se.type) { if (se.vec_node) { e.register_local_var(se.vec_node); } if (se.data) { e.register_local_data(se.data, se.size, 1); } } #ifndef exprtk_disable_string_capabilities else if (scope_element::e_string == se.type) { if (se.str_node) { e.register_local_var(se.str_node); } if (se.data) { e.register_local_data(se.data, se.size, 2); } } #endif se.var_node = 0; se.vec_node = 0; #ifndef exprtk_disable_string_capabilities se.str_node = 0; #endif se.data = 0; se.ref_count = 0; se.active = false; } } inline void register_return_results(expression<T>& e) { e.register_return_results(results_context_); results_context_ = 0; } inline void load_unary_operations_map(unary_op_map_t& m) { #define register_unary_op(Op,UnaryFunctor) \ m.insert(std::make_pair(Op,UnaryFunctor<T>::process)); \ register_unary_op(details:: e_abs, details:: abs_op) register_unary_op(details:: e_acos, details:: acos_op) register_unary_op(details::e_acosh, details::acosh_op) register_unary_op(details:: e_asin, details:: asin_op) register_unary_op(details::e_asinh, details::asinh_op) register_unary_op(details::e_atanh, details::atanh_op) register_unary_op(details:: e_ceil, details:: ceil_op) register_unary_op(details:: e_cos, details:: cos_op) register_unary_op(details:: e_cosh, details:: cosh_op) register_unary_op(details:: e_exp, details:: exp_op) register_unary_op(details::e_expm1, details::expm1_op) register_unary_op(details::e_floor, details::floor_op) register_unary_op(details:: e_log, details:: log_op) register_unary_op(details::e_log10, details::log10_op) register_unary_op(details:: e_log2, details:: log2_op) register_unary_op(details::e_log1p, details::log1p_op) register_unary_op(details:: e_neg, details:: neg_op) register_unary_op(details:: e_pos, details:: pos_op) register_unary_op(details::e_round, details::round_op) register_unary_op(details:: e_sin, details:: sin_op) register_unary_op(details:: e_sinc, details:: sinc_op) register_unary_op(details:: e_sinh, details:: sinh_op) register_unary_op(details:: e_sqrt, details:: sqrt_op) register_unary_op(details:: e_tan, details:: tan_op) register_unary_op(details:: e_tanh, details:: tanh_op) register_unary_op(details:: e_cot, details:: cot_op) register_unary_op(details:: e_sec, details:: sec_op) register_unary_op(details:: e_csc, details:: csc_op) register_unary_op(details:: e_r2d, details:: r2d_op) register_unary_op(details:: e_d2r, details:: d2r_op) register_unary_op(details:: e_d2g, details:: d2g_op) register_unary_op(details:: e_g2d, details:: g2d_op) register_unary_op(details:: e_notl, details:: notl_op) register_unary_op(details:: e_sgn, details:: sgn_op) register_unary_op(details:: e_erf, details:: erf_op) register_unary_op(details:: e_erfc, details:: erfc_op) register_unary_op(details:: e_ncdf, details:: ncdf_op) register_unary_op(details:: e_frac, details:: frac_op) register_unary_op(details::e_trunc, details::trunc_op) #undef register_unary_op } inline void load_binary_operations_map(binary_op_map_t& m) { typedef typename binary_op_map_t::value_type value_type; #define register_binary_op(Op,BinaryFunctor) \ m.insert(value_type(Op,BinaryFunctor<T>::process)); \ register_binary_op(details:: e_add, details:: add_op) register_binary_op(details:: e_sub, details:: sub_op) register_binary_op(details:: e_mul, details:: mul_op) register_binary_op(details:: e_div, details:: div_op) register_binary_op(details:: e_mod, details:: mod_op) register_binary_op(details:: e_pow, details:: pow_op) register_binary_op(details:: e_lt, details:: lt_op) register_binary_op(details:: e_lte, details:: lte_op) register_binary_op(details:: e_gt, details:: gt_op) register_binary_op(details:: e_gte, details:: gte_op) register_binary_op(details:: e_eq, details:: eq_op) register_binary_op(details:: e_ne, details:: ne_op) register_binary_op(details:: e_and, details:: and_op) register_binary_op(details::e_nand, details::nand_op) register_binary_op(details:: e_or, details:: or_op) register_binary_op(details:: e_nor, details:: nor_op) register_binary_op(details:: e_xor, details:: xor_op) register_binary_op(details::e_xnor, details::xnor_op) #undef register_binary_op } inline void load_inv_binary_operations_map(inv_binary_op_map_t& m) { typedef typename inv_binary_op_map_t::value_type value_type; #define register_binary_op(Op,BinaryFunctor) \ m.insert(value_type(BinaryFunctor<T>::process,Op)); \ register_binary_op(details:: e_add, details:: add_op) register_binary_op(details:: e_sub, details:: sub_op) register_binary_op(details:: e_mul, details:: mul_op) register_binary_op(details:: e_div, details:: div_op) register_binary_op(details:: e_mod, details:: mod_op) register_binary_op(details:: e_pow, details:: pow_op) register_binary_op(details:: e_lt, details:: lt_op) register_binary_op(details:: e_lte, details:: lte_op) register_binary_op(details:: e_gt, details:: gt_op) register_binary_op(details:: e_gte, details:: gte_op) register_binary_op(details:: e_eq, details:: eq_op) register_binary_op(details:: e_ne, details:: ne_op) register_binary_op(details:: e_and, details:: and_op) register_binary_op(details::e_nand, details::nand_op) register_binary_op(details:: e_or, details:: or_op) register_binary_op(details:: e_nor, details:: nor_op) register_binary_op(details:: e_xor, details:: xor_op) register_binary_op(details::e_xnor, details::xnor_op) #undef register_binary_op } inline void load_sf3_map(sf3_map_t& sf3_map) { typedef std::pair<trinary_functor_t,details::operator_type> pair_t; #define register_sf3(Op) \ sf3_map[details::sf##Op##_op<T>::id()] = pair_t(details::sf##Op##_op<T>::process,details::e_sf##Op); \ register_sf3(00) register_sf3(01) register_sf3(02) register_sf3(03) register_sf3(04) register_sf3(05) register_sf3(06) register_sf3(07) register_sf3(08) register_sf3(09) register_sf3(10) register_sf3(11) register_sf3(12) register_sf3(13) register_sf3(14) register_sf3(15) register_sf3(16) register_sf3(17) register_sf3(18) register_sf3(19) register_sf3(20) register_sf3(21) register_sf3(22) register_sf3(23) register_sf3(24) register_sf3(25) register_sf3(26) register_sf3(27) register_sf3(28) register_sf3(29) register_sf3(30) #undef register_sf3 #define register_sf3_extid(Id, Op) \ sf3_map[Id] = pair_t(details::sf##Op##_op<T>::process,details::e_sf##Op); \ register_sf3_extid("(t-t)-t",23) // (t-t)-t --> t-(t+t) #undef register_sf3_extid } inline void load_sf4_map(sf4_map_t& sf4_map) { typedef std::pair<quaternary_functor_t,details::operator_type> pair_t; #define register_sf4(Op) \ sf4_map[details::sf##Op##_op<T>::id()] = pair_t(details::sf##Op##_op<T>::process,details::e_sf##Op); \ register_sf4(48) register_sf4(49) register_sf4(50) register_sf4(51) register_sf4(52) register_sf4(53) register_sf4(54) register_sf4(55) register_sf4(56) register_sf4(57) register_sf4(58) register_sf4(59) register_sf4(60) register_sf4(61) register_sf4(62) register_sf4(63) register_sf4(64) register_sf4(65) register_sf4(66) register_sf4(67) register_sf4(68) register_sf4(69) register_sf4(70) register_sf4(71) register_sf4(72) register_sf4(73) register_sf4(74) register_sf4(75) register_sf4(76) register_sf4(77) register_sf4(78) register_sf4(79) register_sf4(80) register_sf4(81) register_sf4(82) register_sf4(83) #undef register_sf4 #define register_sf4ext(Op) \ sf4_map[details::sfext##Op##_op<T>::id()] = pair_t(details::sfext##Op##_op<T>::process,details::e_sf4ext##Op); \ register_sf4ext(00) register_sf4ext(01) register_sf4ext(02) register_sf4ext(03) register_sf4ext(04) register_sf4ext(05) register_sf4ext(06) register_sf4ext(07) register_sf4ext(08) register_sf4ext(09) register_sf4ext(10) register_sf4ext(11) register_sf4ext(12) register_sf4ext(13) register_sf4ext(14) register_sf4ext(15) register_sf4ext(16) register_sf4ext(17) register_sf4ext(18) register_sf4ext(19) register_sf4ext(20) register_sf4ext(21) register_sf4ext(22) register_sf4ext(23) register_sf4ext(24) register_sf4ext(25) register_sf4ext(26) register_sf4ext(27) register_sf4ext(28) register_sf4ext(29) register_sf4ext(30) register_sf4ext(31) register_sf4ext(32) register_sf4ext(33) register_sf4ext(34) register_sf4ext(35) register_sf4ext(36) register_sf4ext(36) register_sf4ext(38) register_sf4ext(39) register_sf4ext(40) register_sf4ext(41) register_sf4ext(42) register_sf4ext(43) register_sf4ext(44) register_sf4ext(45) register_sf4ext(46) register_sf4ext(47) register_sf4ext(48) register_sf4ext(49) register_sf4ext(50) register_sf4ext(51) register_sf4ext(52) register_sf4ext(53) register_sf4ext(54) register_sf4ext(55) register_sf4ext(56) register_sf4ext(57) register_sf4ext(58) register_sf4ext(59) register_sf4ext(60) register_sf4ext(61) #undef register_sf4ext } inline results_context_t& results_ctx() { if (0 == results_context_) { results_context_ = new results_context_t(); } return (*results_context_); } inline void return_cleanup() { #ifndef exprtk_disable_return_statement if (results_context_) { delete results_context_; results_context_ = 0; } state_.return_stmt_present = false; #endif } private: parser(const parser<T>&); parser<T>& operator=(const parser<T>&); settings_store settings_; expression_generator<T> expression_generator_; details::node_allocator node_allocator_; symtab_store symtab_store_; dependent_entity_collector dec_; std::deque<parser_error::type> error_list_; std::deque<bool> brkcnt_list_; parser_state state_; bool resolve_unknown_symbol_; results_context_t* results_context_; unknown_symbol_resolver* unknown_symbol_resolver_; unknown_symbol_resolver default_usr_; base_ops_map_t base_ops_map_; unary_op_map_t unary_op_map_; binary_op_map_t binary_op_map_; inv_binary_op_map_t inv_binary_op_map_; sf3_map_t sf3_map_; sf4_map_t sf4_map_; std::string synthesis_error_; scope_element_manager sem_; lexer::helper::helper_assembly helper_assembly_; lexer::helper::commutative_inserter commutative_inserter_; lexer::helper::operator_joiner operator_joiner_2_; lexer::helper::operator_joiner operator_joiner_3_; lexer::helper::symbol_replacer symbol_replacer_; lexer::helper::bracket_checker bracket_checker_; lexer::helper::numeric_checker numeric_checker_; lexer::helper::sequence_validator sequence_validator_; template <typename ParserType> friend void details::disable_type_checking(ParserType& p); }; template <typename Allocator, template <typename, typename> class Sequence> inline bool collect_variables(const std::string& expr_str, Sequence<std::string, Allocator>& symbol_list) { typedef double T; typedef exprtk::symbol_table<T> symbol_table_t; typedef exprtk::expression<T> expression_t; typedef exprtk::parser<T> parser_t; typedef parser_t::dependent_entity_collector::symbol_t symbol_t; symbol_table_t symbol_table; expression_t expression; parser_t parser; expression.register_symbol_table(symbol_table); parser.enable_unknown_symbol_resolver(); parser.dec().collect_variables() = true; if (!parser.compile(expr_str, expression)) return false; std::deque<symbol_t> symb_list; parser.dec().symbols(symb_list); for (std::size_t i = 0; i < symb_list.size(); ++i) { symbol_list.push_back(symb_list[i].first); } return true; } template <typename T, typename Allocator, template <typename, typename> class Sequence> inline bool collect_variables(const std::string& expr_str, exprtk::symbol_table<T>& extrnl_symbol_table, Sequence<std::string, Allocator>& symbol_list) { typedef exprtk::symbol_table<T> symbol_table_t; typedef exprtk::expression<T> expression_t; typedef exprtk::parser<T> parser_t; typedef typename parser_t::dependent_entity_collector::symbol_t symbol_t; symbol_table_t symbol_table; expression_t expression; parser_t parser; expression.register_symbol_table(symbol_table); expression.register_symbol_table(extrnl_symbol_table); parser.enable_unknown_symbol_resolver(); parser.dec().collect_variables() = true; details::disable_type_checking(parser); if (!parser.compile(expr_str, expression)) return false; std::deque<symbol_t> symb_list; parser.dec().symbols(symb_list); for (std::size_t i = 0; i < symb_list.size(); ++i) { symbol_list.push_back(symb_list[i].first); } return true; } template <typename Allocator, template <typename, typename> class Sequence> inline bool collect_functions(const std::string& expr_str, Sequence<std::string, Allocator>& symbol_list) { typedef double T; typedef exprtk::symbol_table<T> symbol_table_t; typedef exprtk::expression<T> expression_t; typedef exprtk::parser<T> parser_t; typedef parser_t::dependent_entity_collector::symbol_t symbol_t; symbol_table_t symbol_table; expression_t expression; parser_t parser; expression.register_symbol_table(symbol_table); parser.enable_unknown_symbol_resolver(); parser.dec().collect_functions() = true; if (!parser.compile(expr_str, expression)) return false; std::deque<symbol_t> symb_list; parser.dec().symbols(symb_list); for (std::size_t i = 0; i < symb_list.size(); ++i) { symbol_list.push_back(symb_list[i].first); } return true; } template <typename T, typename Allocator, template <typename, typename> class Sequence> inline bool collect_functions(const std::string& expr_str, exprtk::symbol_table<T>& extrnl_symbol_table, Sequence<std::string, Allocator>& symbol_list) { typedef exprtk::symbol_table<T> symbol_table_t; typedef exprtk::expression<T> expression_t; typedef exprtk::parser<T> parser_t; typedef typename parser_t::dependent_entity_collector::symbol_t symbol_t; symbol_table_t symbol_table; expression_t expression; parser_t parser; expression.register_symbol_table(symbol_table); expression.register_symbol_table(extrnl_symbol_table); parser.enable_unknown_symbol_resolver(); parser.dec().collect_functions() = true; details::disable_type_checking(parser); if (!parser.compile(expr_str, expression)) return false; std::deque<symbol_t> symb_list; parser.dec().symbols(symb_list); for (std::size_t i = 0; i < symb_list.size(); ++i) { symbol_list.push_back(symb_list[i].first); } return true; } template <typename T> inline T integrate(const expression<T>& e, T& x, const T& r0, const T& r1, const std::size_t number_of_intervals = 1000000) { if (r0 > r1) return T(0); const T h = (r1 - r0) / (T(2) * number_of_intervals); T total_area = T(0); for (std::size_t i = 0; i < number_of_intervals; ++i) { x = r0 + T(2) * i * h; const T y0 = e.value(); x += h; const T y1 = e.value(); x += h; const T y2 = e.value(); x += h; total_area += h * (y0 + T(4) * y1 + y2) / T(3); } return total_area; } template <typename T> inline T integrate(const expression<T>& e, const std::string& variable_name, const T& r0, const T& r1, const std::size_t number_of_intervals = 1000000) { const symbol_table<T>& sym_table = e.get_symbol_table(); if (!sym_table.valid()) return std::numeric_limits<T>::quiet_NaN(); details::variable_node<T>* var = sym_table.get_variable(variable_name); if (var) { T& x = var->ref(); T x_original = x; T result = integrate(e,x,r0,r1,number_of_intervals); x = x_original; return result; } else return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T derivative(const expression<T>& e, T& x, const T& h = T(0.00000001)) { const T x_init = x; const T _2h = T(2) * h; x = x_init + _2h; const T y0 = e.value(); x = x_init + h; const T y1 = e.value(); x = x_init - h; const T y2 = e.value(); x = x_init - _2h; const T y3 = e.value(); x = x_init; return (-y0 + T(8) * (y1 - y2) + y3) / (T(12) * h); } template <typename T> inline T second_derivative(const expression<T>& e, T& x, const T& h = T(0.00001)) { const T x_init = x; const T _2h = T(2) * h; const T y = e.value(); x = x_init + _2h; const T y0 = e.value(); x = x_init + h; const T y1 = e.value(); x = x_init - h; const T y2 = e.value(); x = x_init - _2h; const T y3 = e.value(); x = x_init; return (-y0 + T(16) * (y1 + y2) - T(30) * y - y3) / (T(12) * h * h); } template <typename T> inline T third_derivative(const expression<T>& e, T& x, const T& h = T(0.0001)) { const T x_init = x; const T _2h = T(2) * h; x = x_init + _2h; const T y0 = e.value(); x = x_init + h; const T y1 = e.value(); x = x_init - h; const T y2 = e.value(); x = x_init - _2h; const T y3 = e.value(); x = x_init; return (y0 + T(2) * (y2 - y1) - y3) / (T(2) * h * h * h); } template <typename T> inline T derivative(const expression<T>& e, const std::string& variable_name, const T& h = T(0.00000001)) { const symbol_table<T>& sym_table = e.get_symbol_table(); if (!sym_table.valid()) { return std::numeric_limits<T>::quiet_NaN(); } details::variable_node<T>* var = sym_table.get_variable(variable_name); if (var) { T& x = var->ref(); T x_original = x; T result = derivative(e,x,h); x = x_original; return result; } else return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T second_derivative(const expression<T>& e, const std::string& variable_name, const T& h = T(0.00001)) { const symbol_table<T>& sym_table = e.get_symbol_table(); if (!sym_table.valid()) { return std::numeric_limits<T>::quiet_NaN(); } details::variable_node<T>* var = sym_table.get_variable(variable_name); if (var) { T& x = var->ref(); const T x_original = x; const T result = second_derivative(e,x,h); x = x_original; return result; } else return std::numeric_limits<T>::quiet_NaN(); } template <typename T> inline T third_derivative(const expression<T>& e, const std::string& variable_name, const T& h = T(0.0001)) { const symbol_table<T>& sym_table = e.get_symbol_table(); if (!sym_table.valid()) { return std::numeric_limits<T>::quiet_NaN(); } details::variable_node<T>* var = sym_table.get_variable(variable_name); if (var) { T& x = var->ref(); const T x_original = x; const T result = third_derivative(e,x,h); x = x_original; return result; } else return std::numeric_limits<T>::quiet_NaN(); } /* Note: The following 'compute' routines are simple helpers, for quickly setting up the required pieces of code in order to evaluate an expression. By virtue of how they operate there will be an overhead with regards to their setup and teardown and hence should not be used in time critical sections of code. Furthermore they only assume a small sub set of variables, no string variables or user defined functions. */ template <typename T> inline bool compute(const std::string& expression_string, T& result) { // No variables symbol_table<T> symbol_table; symbol_table.add_constants(); expression<T> expression; expression.register_symbol_table(symbol_table); parser<T> parser; if (parser.compile(expression_string,expression)) { result = expression.value(); return true; } else return false; } template <typename T> inline bool compute(const std::string& expression_string, const T& x, T& result) { // Only 'x' static const std::string x_var("x"); symbol_table<T> symbol_table; symbol_table.add_constants(); symbol_table.add_constant(x_var,x); expression<T> expression; expression.register_symbol_table(symbol_table); parser<T> parser; if (parser.compile(expression_string,expression)) { result = expression.value(); return true; } else return false; } template <typename T> inline bool compute(const std::string& expression_string, const T&x, const T& y, T& result) { // Only 'x' and 'y' static const std::string x_var("x"); static const std::string y_var("y"); symbol_table<T> symbol_table; symbol_table.add_constants(); symbol_table.add_constant(x_var,x); symbol_table.add_constant(y_var,y); expression<T> expression; expression.register_symbol_table(symbol_table); parser<T> parser; if (parser.compile(expression_string,expression)) { result = expression.value(); return true; } else return false; } template <typename T> inline bool compute(const std::string& expression_string, const T& x, const T& y, const T& z, T& result) { // Only 'x', 'y' or 'z' static const std::string x_var("x"); static const std::string y_var("y"); static const std::string z_var("z"); symbol_table<T> symbol_table; symbol_table.add_constants(); symbol_table.add_constant(x_var,x); symbol_table.add_constant(y_var,y); symbol_table.add_constant(z_var,z); expression<T> expression; expression.register_symbol_table(symbol_table); parser<T> parser; if (parser.compile(expression_string,expression)) { result = expression.value(); return true; } else return false; } template <typename T, std::size_t N> class polynomial : public ifunction<T> { private: template <typename Type, std::size_t NumberOfCoefficients> struct poly_impl { }; template <typename Type> struct poly_impl <Type,12> { static inline T evaluate(const Type x, const Type c12, const Type c11, const Type c10, const Type c9, const Type c8, const Type c7, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_12x^12 + c_11x^11 + c_10x^10 + c_9x^9 + c_8x^8 + c_7x^7 + c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return ((((((((((((c12 * x + c11) * x + c10) * x + c9) * x + c8) * x + c7) * x + c6) * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,11> { static inline T evaluate(const Type x, const Type c11, const Type c10, const Type c9, const Type c8, const Type c7, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_11x^11 + c_10x^10 + c_9x^9 + c_8x^8 + c_7x^7 + c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return (((((((((((c11 * x + c10) * x + c9) * x + c8) * x + c7) * x + c6) * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,10> { static inline T evaluate(const Type x, const Type c10, const Type c9, const Type c8, const Type c7, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_10x^10 + c_9x^9 + c_8x^8 + c_7x^7 + c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return ((((((((((c10 * x + c9) * x + c8) * x + c7) * x + c6) * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,9> { static inline T evaluate(const Type x, const Type c9, const Type c8, const Type c7, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_9x^9 + c_8x^8 + c_7x^7 + c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return (((((((((c9 * x + c8) * x + c7) * x + c6) * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,8> { static inline T evaluate(const Type x, const Type c8, const Type c7, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_8x^8 + c_7x^7 + c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return ((((((((c8 * x + c7) * x + c6) * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,7> { static inline T evaluate(const Type x, const Type c7, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_7x^7 + c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return (((((((c7 * x + c6) * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,6> { static inline T evaluate(const Type x, const Type c6, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_6x^6 + c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return ((((((c6 * x + c5) * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,5> { static inline T evaluate(const Type x, const Type c5, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_5x^5 + c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return (((((c5 * x + c4) * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,4> { static inline T evaluate(const Type x, const Type c4, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_4x^4 + c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return ((((c4 * x + c3) * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,3> { static inline T evaluate(const Type x, const Type c3, const Type c2, const Type c1, const Type c0) { // p(x) = c_3x^3 + c_2x^2 + c_1x^1 + c_0x^0 return (((c3 * x + c2) * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,2> { static inline T evaluate(const Type x, const Type c2, const Type c1, const Type c0) { // p(x) = c_2x^2 + c_1x^1 + c_0x^0 return ((c2 * x + c1) * x + c0); } }; template <typename Type> struct poly_impl <Type,1> { static inline T evaluate(const Type x, const Type c1, const Type c0) { // p(x) = c_1x^1 + c_0x^0 return (c1 * x + c0); } }; public: using ifunction<T>::operator(); polynomial() : ifunction<T>((N+2 <= 20) ? (N + 2) : std::numeric_limits<std::size_t>::max()) { disable_has_side_effects(*this); } virtual ~polynomial() {} #define poly_rtrn(NN) \ return (NN != N) ? std::numeric_limits<T>::quiet_NaN() : inline virtual T operator() (const T& x, const T& c1, const T& c0) { poly_rtrn(1) poly_impl<T,1>::evaluate(x,c1,c0); } inline virtual T operator() (const T& x, const T& c2, const T& c1, const T& c0) { poly_rtrn(2) poly_impl<T,2>::evaluate(x,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(3) poly_impl<T,3>::evaluate(x,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(4) poly_impl<T,4>::evaluate(x,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(5) poly_impl<T,5>::evaluate(x,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(6) poly_impl<T,6>::evaluate(x,c6,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c7, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(7) poly_impl<T,7>::evaluate(x,c7,c6,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c8, const T& c7, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(8) poly_impl<T,8>::evaluate(x,c8,c7,c6,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c9, const T& c8, const T& c7, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(9) poly_impl<T,9>::evaluate(x,c9,c8,c7,c6,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c10, const T& c9, const T& c8, const T& c7, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(10) poly_impl<T,10>::evaluate(x,c10,c9,c8,c7,c6,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c11, const T& c10, const T& c9, const T& c8, const T& c7, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(11) poly_impl<T,11>::evaluate(x,c11,c10,c9,c8,c7,c6,c5,c4,c3,c2,c1,c0); } inline virtual T operator() (const T& x, const T& c12, const T& c11, const T& c10, const T& c9, const T& c8, const T& c7, const T& c6, const T& c5, const T& c4, const T& c3, const T& c2, const T& c1, const T& c0) { poly_rtrn(12) poly_impl<T,12>::evaluate(x,c12,c11,c10,c9,c8,c7,c6,c5,c4,c3,c2,c1,c0); } #undef poly_rtrn inline virtual T operator() () { return std::numeric_limits<T>::quiet_NaN(); } inline virtual T operator() (const T&) { return std::numeric_limits<T>::quiet_NaN(); } inline virtual T operator() (const T&, const T&) { return std::numeric_limits<T>::quiet_NaN(); } }; template <typename T> class function_compositor { public: typedef exprtk::expression<T> expression_t; typedef exprtk::symbol_table<T> symbol_table_t; typedef exprtk::parser<T> parser_t; typedef typename parser_t::settings_store settings_t; struct function { function() {} function(const std::string& n) : name_(n) {} function(const std::string& name, const std::string& expression) : name_(name), expression_(expression) {} function(const std::string& name, const std::string& expression, const std::string& v0) : name_(name), expression_(expression) { v_.push_back(v0); } function(const std::string& name, const std::string& expression, const std::string& v0, const std::string& v1) : name_(name), expression_(expression) { v_.push_back(v0); v_.push_back(v1); } function(const std::string& name, const std::string& expression, const std::string& v0, const std::string& v1, const std::string& v2) : name_(name), expression_(expression) { v_.push_back(v0); v_.push_back(v1); v_.push_back(v2); } function(const std::string& name, const std::string& expression, const std::string& v0, const std::string& v1, const std::string& v2, const std::string& v3) : name_(name), expression_(expression) { v_.push_back(v0); v_.push_back(v1); v_.push_back(v2); v_.push_back(v3); } function(const std::string& name, const std::string& expression, const std::string& v0, const std::string& v1, const std::string& v2, const std::string& v3, const std::string& v4) : name_(name), expression_(expression) { v_.push_back(v0); v_.push_back(v1); v_.push_back(v2); v_.push_back(v3); v_.push_back(v4); } inline function& name(const std::string& n) { name_ = n; return (*this); } inline function& expression(const std::string& e) { expression_ = e; return (*this); } inline function& var(const std::string& v) { v_.push_back(v); return (*this); } std::string name_; std::string expression_; std::deque<std::string> v_; }; private: struct base_func : public exprtk::ifunction<T> { typedef const T& type; typedef exprtk::ifunction<T> function_t; typedef std::vector<T*> varref_t; typedef std::vector<T> var_t; typedef std::pair<T*,std::size_t> lvarref_t; typedef std::vector<lvarref_t> lvr_vec_t; using exprtk::ifunction<T>::operator(); base_func(const std::size_t& pc = 0) : exprtk::ifunction<T>(pc), local_var_stack_size(0), stack_depth(0) { v.resize(pc); } virtual ~base_func() {} inline void update(const T& v0) { (*v[0]) = v0; } inline void update(const T& v0, const T& v1) { (*v[0]) = v0; (*v[1]) = v1; } inline void update(const T& v0, const T& v1, const T& v2) { (*v[0]) = v0; (*v[1]) = v1; (*v[2]) = v2; } inline void update(const T& v0, const T& v1, const T& v2, const T& v3) { (*v[0]) = v0; (*v[1]) = v1; (*v[2]) = v2; (*v[3]) = v3; } inline void update(const T& v0, const T& v1, const T& v2, const T& v3, const T& v4) { (*v[0]) = v0; (*v[1]) = v1; (*v[2]) = v2; (*v[3]) = v3; (*v[4]) = v4; } inline void update(const T& v0, const T& v1, const T& v2, const T& v3, const T& v4, const T& v5) { (*v[0]) = v0; (*v[1]) = v1; (*v[2]) = v2; (*v[3]) = v3; (*v[4]) = v4; (*v[5]) = v5; } inline function_t& setup(expression_t& expr) { expression = expr; typedef typename expression_t::control_block::local_data_list_t ldl_t; ldl_t ldl = expr.local_data_list(); std::vector<std::size_t> index_list; for (std::size_t i = 0; i < ldl.size(); ++i) { if (ldl[i].size) { index_list.push_back(i); } } std::size_t input_param_count = 0; for (std::size_t i = 0; i < index_list.size(); ++i) { const std::size_t index = index_list[i]; if (i < (index_list.size() - v.size())) { lv.push_back( std::make_pair( reinterpret_cast<T*>(ldl[index].pointer), ldl[index].size)); local_var_stack_size += ldl[index].size; } else v[input_param_count++] = reinterpret_cast<T*>(ldl[index].pointer); } clear_stack(); return (*this); } inline void pre() { if (stack_depth++) { if (!v.empty()) { var_t var_stack(v.size(),T(0)); copy(v,var_stack); param_stack.push_back(var_stack); } if (!lv.empty()) { var_t local_var_stack(local_var_stack_size,T(0)); copy(lv,local_var_stack); local_stack.push_back(local_var_stack); } } } inline void post() { if (--stack_depth) { if (!v.empty()) { copy(param_stack.back(),v); param_stack.pop_back(); } if (!lv.empty()) { copy(local_stack.back(),lv); local_stack.pop_back(); } } } void copy(const varref_t& src_v, var_t& dest_v) { for (std::size_t i = 0; i < src_v.size(); ++i) { dest_v[i] = (*src_v[i]); } } void copy(const var_t& src_v, varref_t& dest_v) { for (std::size_t i = 0; i < src_v.size(); ++i) { (*dest_v[i]) = src_v[i]; } } void copy(const lvr_vec_t& src_v, var_t& dest_v) { typename var_t::iterator itr = dest_v.begin(); typedef typename std::iterator_traits<typename var_t::iterator>::difference_type diff_t; for (std::size_t i = 0; i < src_v.size(); ++i) { lvarref_t vr = src_v[i]; if (1 == vr.second) *itr++ = (*vr.first); else { std::copy(vr.first, vr.first + vr.second, itr); itr += static_cast<diff_t>(vr.second); } } } void copy(const var_t& src_v, lvr_vec_t& dest_v) { typename var_t::const_iterator itr = src_v.begin(); typedef typename std::iterator_traits<typename var_t::iterator>::difference_type diff_t; for (std::size_t i = 0; i < src_v.size(); ++i) { lvarref_t vr = dest_v[i]; if (1 == vr.second) (*vr.first) = *itr++; else { std::copy(itr, itr + static_cast<diff_t>(vr.second), vr.first); itr += static_cast<diff_t>(vr.second); } } } inline void clear_stack() { for (std::size_t i = 0; i < v.size(); ++i) { (*v[i]) = 0; } } inline virtual T value(expression_t& e) { return e.value(); } expression_t expression; varref_t v; lvr_vec_t lv; std::size_t local_var_stack_size; std::size_t stack_depth; std::deque<var_t> param_stack; std::deque<var_t> local_stack; }; typedef std::map<std::string,base_func*> funcparam_t; struct func_0param : public base_func { using exprtk::ifunction<T>::operator(); func_0param() : base_func(0) {} inline T operator() () { return this->value(base_func::expression); } }; typedef const T& type; template <typename BaseFuncType> struct scoped_bft { scoped_bft(BaseFuncType& bft) : bft_(bft) { bft_.pre (); } ~scoped_bft() { bft_.post(); } BaseFuncType& bft_; private: scoped_bft(scoped_bft&); scoped_bft& operator=(scoped_bft&); }; struct func_1param : public base_func { using exprtk::ifunction<T>::operator(); func_1param() : base_func(1) {} inline T operator() (type v0) { scoped_bft<func_1param> sb(*this); base_func::update(v0); return this->value(base_func::expression); } }; struct func_2param : public base_func { using exprtk::ifunction<T>::operator(); func_2param() : base_func(2) {} inline T operator() (type v0, type v1) { scoped_bft<func_2param> sb(*this); base_func::update(v0, v1); return this->value(base_func::expression); } }; struct func_3param : public base_func { using exprtk::ifunction<T>::operator(); func_3param() : base_func(3) {} inline T operator() (type v0, type v1, type v2) { scoped_bft<func_3param> sb(*this); base_func::update(v0, v1, v2); return this->value(base_func::expression); } }; struct func_4param : public base_func { using exprtk::ifunction<T>::operator(); func_4param() : base_func(4) {} inline T operator() (type v0, type v1, type v2, type v3) { scoped_bft<func_4param> sb(*this); base_func::update(v0, v1, v2, v3); return this->value(base_func::expression); } }; struct func_5param : public base_func { using exprtk::ifunction<T>::operator(); func_5param() : base_func(5) {} inline T operator() (type v0, type v1, type v2, type v3, type v4) { scoped_bft<func_5param> sb(*this); base_func::update(v0, v1, v2, v3, v4); return this->value(base_func::expression); } }; struct func_6param : public base_func { using exprtk::ifunction<T>::operator(); func_6param() : base_func(6) {} inline T operator() (type v0, type v1, type v2, type v3, type v4, type v5) { scoped_bft<func_6param> sb(*this); base_func::update(v0, v1, v2, v3, v4, v5); return this->value(base_func::expression); } }; static T return_value(expression_t& e) { typedef exprtk::results_context<T> results_context_t; typedef typename results_context_t::type_store_t type_t; typedef typename type_t::scalar_view scalar_t; T result = e.value(); if (e.return_invoked()) { // Due to the post compilation checks, it can be safely // assumed that there will be at least one parameter // and that the first parameter will always be scalar. return scalar_t(e.results()[0])(); } return result; } #define def_fp_retval(N) \ struct func_##N##param_retval : public func_##N##param \ { \ inline T value(expression_t& e) \ { \ return return_value(e); \ } \ }; \ def_fp_retval(0) def_fp_retval(1) def_fp_retval(2) def_fp_retval(3) def_fp_retval(4) def_fp_retval(5) def_fp_retval(6) template <typename Allocator, template <typename,typename> class Sequence> inline bool add(const std::string& name, const std::string& expression, const Sequence<std::string,Allocator>& var_list, const bool override = false) { const typename std::map<std::string,expression_t>::iterator itr = expr_map_.find(name); if (expr_map_.end() != itr) { if (!override) { exprtk_debug(("Compositor error(add): function '%s' already defined\n", name.c_str())); return false; } remove(name, var_list.size()); } if (compile_expression(name,expression,var_list)) { const std::size_t n = var_list.size(); fp_map_[n][name]->setup(expr_map_[name]); return true; } else { exprtk_debug(("Compositor error(add): Failed to compile function '%s'\n", name.c_str())); return false; } } public: function_compositor() : parser_(settings_t::compile_all_opts + settings_t::e_disable_zero_return), fp_map_(7) {} function_compositor(const symbol_table_t& st) : symbol_table_(st), parser_(settings_t::compile_all_opts + settings_t::e_disable_zero_return), fp_map_(7) {} ~function_compositor() { clear(); } inline symbol_table_t& symbol_table() { return symbol_table_; } inline void add_auxiliary_symtab(symbol_table_t& symtab) { auxiliary_symtab_list_.push_back(&symtab); } void clear() { symbol_table_.clear(); expr_map_ .clear(); for (std::size_t i = 0; i < fp_map_.size(); ++i) { typename funcparam_t::iterator itr = fp_map_[i].begin(); typename funcparam_t::iterator end = fp_map_[i].end (); while (itr != end) { delete itr->second; ++itr; } fp_map_[i].clear(); } } inline bool add(const function& f, const bool override = false) { return add(f.name_, f.expression_, f.v_,override); } private: template <typename Allocator, template <typename,typename> class Sequence> bool compile_expression(const std::string& name, const std::string& expression, const Sequence<std::string,Allocator>& input_var_list, bool return_present = false) { expression_t compiled_expression; symbol_table_t local_symbol_table; local_symbol_table.load_from(symbol_table_); local_symbol_table.add_constants(); if (!valid(name,input_var_list.size())) return false; if (!forward(name, input_var_list.size(), local_symbol_table, return_present)) return false; compiled_expression.register_symbol_table(local_symbol_table); for (std::size_t i = 0; i < auxiliary_symtab_list_.size(); ++i) { compiled_expression.register_symbol_table((*auxiliary_symtab_list_[i])); } std::string mod_expression; for (std::size_t i = 0; i < input_var_list.size(); ++i) { mod_expression += " var " + input_var_list[i] + "{};\n"; } if ( ('{' == details::front(expression)) && ('}' == details::back (expression)) ) mod_expression += "~" + expression + ";"; else mod_expression += "~{" + expression + "};"; if (!parser_.compile(mod_expression,compiled_expression)) { exprtk_debug(("Compositor Error: %s\n",parser_.error().c_str())); exprtk_debug(("Compositor modified expression: \n%s\n",mod_expression.c_str())); remove(name,input_var_list.size()); return false; } if (!return_present && parser_.dec().return_present()) { remove(name,input_var_list.size()); return compile_expression(name, expression, input_var_list, true); } // Make sure every return point has a scalar as its first parameter if (parser_.dec().return_present()) { typedef std::vector<std::string> str_list_t; str_list_t ret_param_list = parser_.dec().return_param_type_list(); for (std::size_t i = 0; i < ret_param_list.size(); ++i) { const std::string& params = ret_param_list[i]; if (params.empty() || ('T' != params[0])) { exprtk_debug(("Compositor Error: Return statement in function '%s' is invalid\n", name.c_str())); remove(name,input_var_list.size()); return false; } } } expr_map_[name] = compiled_expression; exprtk::ifunction<T>& ifunc = (*(fp_map_[input_var_list.size()])[name]); if (symbol_table_.add_function(name,ifunc)) return true; else { exprtk_debug(("Compositor Error: Failed to add function '%s' to symbol table\n", name.c_str())); return false; } } inline bool symbol_used(const std::string& symbol) const { return ( symbol_table_.is_variable (symbol) || symbol_table_.is_stringvar (symbol) || symbol_table_.is_function (symbol) || symbol_table_.is_vector (symbol) || symbol_table_.is_vararg_function(symbol) ); } inline bool valid(const std::string& name, const std::size_t& arg_count) const { if (arg_count > 6) return false; else if (symbol_used(name)) return false; else if (fp_map_[arg_count].end() != fp_map_[arg_count].find(name)) return false; else return true; } inline bool forward(const std::string& name, const std::size_t& arg_count, symbol_table_t& sym_table, const bool ret_present = false) { switch (arg_count) { #define case_stmt(N) \ case N : (fp_map_[arg_count])[name] = \ (!ret_present) ? static_cast<base_func*> \ (new func_##N##param) : \ static_cast<base_func*> \ (new func_##N##param_retval) ; \ break; \ case_stmt(0) case_stmt(1) case_stmt(2) case_stmt(3) case_stmt(4) case_stmt(5) case_stmt(6) #undef case_stmt } exprtk::ifunction<T>& ifunc = (*(fp_map_[arg_count])[name]); return sym_table.add_function(name,ifunc); } inline void remove(const std::string& name, const std::size_t& arg_count) { if (arg_count > 6) return; const typename std::map<std::string,expression_t>::iterator em_itr = expr_map_.find(name); if (expr_map_.end() != em_itr) { expr_map_.erase(em_itr); } const typename funcparam_t::iterator fp_itr = fp_map_[arg_count].find(name); if (fp_map_[arg_count].end() != fp_itr) { delete fp_itr->second; fp_map_[arg_count].erase(fp_itr); } symbol_table_.remove_function(name); } private: symbol_table_t symbol_table_; parser_t parser_; std::map<std::string,expression_t> expr_map_; std::vector<funcparam_t> fp_map_; std::vector<symbol_table_t*> auxiliary_symtab_list_; }; template <typename T> inline bool pgo_primer() { static const std::string expression_list[] = { "(y + x)", "2 * (y + x)", "(2 * y + 2 * x)", "(y + x / y) * (x - y / x)", "x / ((x + y) * (x - y)) / y", "1 - ((x * y) + (y / x)) - 3", "sin(2 * x) + cos(pi / y)", "1 - sin(2 * x) + cos(pi / y)", "sqrt(1 - sin(2 * x) + cos(pi / y) / 3)", "(x^2 / sin(2 * pi / y)) -x / 2", "x + (cos(y - sin(2 / x * pi)) - sin(x - cos(2 * y / pi))) - y", "clamp(-1.0, sin(2 * pi * x) + cos(y / 2 * pi), +1.0)", "iclamp(-1.0, sin(2 * pi * x) + cos(y / 2 * pi), +1.0)", "max(3.33, min(sqrt(1 - sin(2 * x) + cos(pi / y) / 3), 1.11))", "if(avg(x,y) <= x + y, x - y, x * y) + 2 * pi / x", "1.1x^1 + 2.2y^2 - 3.3x^3 + 4.4y^4 - 5.5x^5 + 6.6y^6 - 7.7x^27 + 8.8y^55", "(yy + xx)", "2 * (yy + xx)", "(2 * yy + 2 * xx)", "(yy + xx / yy) * (xx - yy / xx)", "xx / ((xx + yy) * (xx - yy)) / yy", "1 - ((xx * yy) + (yy / xx)) - 3", "sin(2 * xx) + cos(pi / yy)", "1 - sin(2 * xx) + cos(pi / yy)", "sqrt(1 - sin(2 * xx) + cos(pi / yy) / 3)", "(xx^2 / sin(2 * pi / yy)) -xx / 2", "xx + (cos(yy - sin(2 / xx * pi)) - sin(xx - cos(2 * yy / pi))) - yy", "clamp(-1.0, sin(2 * pi * xx) + cos(yy / 2 * pi), +1.0)", "max(3.33, min(sqrt(1 - sin(2 * xx) + cos(pi / yy) / 3), 1.11))", "if(avg(xx,yy) <= xx + yy, xx - yy, xx * yy) + 2 * pi / xx", "1.1xx^1 + 2.2yy^2 - 3.3xx^3 + 4.4yy^4 - 5.5xx^5 + 6.6yy^6 - 7.7xx^27 + 8.8yy^55", "(1.1*(2.2*(3.3*(4.4*(5.5*(6.6*(7.7*(8.8*(9.9+x)))))))))", "(((((((((x+9.9)*8.8)*7.7)*6.6)*5.5)*4.4)*3.3)*2.2)*1.1)", "(x + y) * z", "x + (y * z)", "(x + y) * 7", "x + (y * 7)", "(x + 7) * y", "x + (7 * y)", "(7 + x) * y", "7 + (x * y)", "(2 + x) * 3", "2 + (x * 3)", "(2 + 3) * x", "2 + (3 * x)", "(x + 2) * 3", "x + (2 * 3)", "(x + y) * (z / w)", "(x + y) * (z / 7)", "(x + y) * (7 / z)", "(x + 7) * (y / z)", "(7 + x) * (y / z)", "(2 + x) * (y / z)", "(x + 2) * (y / 3)", "(2 + x) * (y / 3)", "(x + 2) * (3 / y)", "x + (y * (z / w))", "x + (y * (z / 7))", "x + (y * (7 / z))", "x + (7 * (y / z))", "7 + (x * (y / z))", "2 + (x * (3 / y))", "x + (2 * (y / 4))", "2 + (x * (y / 3))", "x + (2 * (3 / y))", "x + ((y * z) / w)", "x + ((y * z) / 7)", "x + ((y * 7) / z)", "x + ((7 * y) / z)", "7 + ((y * z) / w)", "2 + ((x * 3) / y)", "x + ((2 * y) / 3)", "2 + ((x * y) / 3)", "x + ((2 * 3) / y)", "(((x + y) * z) / w)", "(((x + y) * z) / 7)", "(((x + y) * 7) / z)", "(((x + 7) * y) / z)", "(((7 + x) * y) / z)", "(((2 + x) * 3) / y)", "(((x + 2) * y) / 3)", "(((2 + x) * y) / 3)", "(((x + 2) * 3) / y)", "((x + (y * z)) / w)", "((x + (y * z)) / 7)", "((x + (y * 7)) / y)", "((x + (7 * y)) / z)", "((7 + (x * y)) / z)", "((2 + (x * 3)) / y)", "((x + (2 * y)) / 3)", "((2 + (x * y)) / 3)", "((x + (2 * 3)) / y)", "(xx + yy) * zz", "xx + (yy * zz)", "(xx + yy) * 7", "xx + (yy * 7)", "(xx + 7) * yy", "xx + (7 * yy)", "(7 + xx) * yy", "7 + (xx * yy)", "(2 + x) * 3", "2 + (x * 3)", "(2 + 3) * x", "2 + (3 * x)", "(x + 2) * 3", "x + (2 * 3)", "(xx + yy) * (zz / ww)", "(xx + yy) * (zz / 7)", "(xx + yy) * (7 / zz)", "(xx + 7) * (yy / zz)", "(7 + xx) * (yy / zz)", "(2 + xx) * (yy / zz)", "(xx + 2) * (yy / 3)", "(2 + xx) * (yy / 3)", "(xx + 2) * (3 / yy)", "xx + (yy * (zz / ww))", "xx + (yy * (zz / 7))", "xx + (yy * (7 / zz))", "xx + (7 * (yy / zz))", "7 + (xx * (yy / zz))", "2 + (xx * (3 / yy))", "xx + (2 * (yy / 4))", "2 + (xx * (yy / 3))", "xx + (2 * (3 / yy))", "xx + ((yy * zz) / ww)", "xx + ((yy * zz) / 7)", "xx + ((yy * 7) / zz)", "xx + ((7 * yy) / zz)", "7 + ((yy * zz) / ww)", "2 + ((xx * 3) / yy)", "xx + ((2 * yy) / 3)", "2 + ((xx * yy) / 3)", "xx + ((2 * 3) / yy)", "(((xx + yy) * zz) / ww)", "(((xx + yy) * zz) / 7)", "(((xx + yy) * 7) / zz)", "(((xx + 7) * yy) / zz)", "(((7 + xx) * yy) / zz)", "(((2 + xx) * 3) / yy)", "(((xx + 2) * yy) / 3)", "(((2 + xx) * yy) / 3)", "(((xx + 2) * 3) / yy)", "((xx + (yy * zz)) / ww)", "((xx + (yy * zz)) / 7)", "((xx + (yy * 7)) / yy)", "((xx + (7 * yy)) / zz)", "((7 + (xx * yy)) / zz)", "((2 + (xx * 3)) / yy)", "((xx + (2 * yy)) / 3)", "((2 + (xx * yy)) / 3)", "((xx + (2 * 3)) / yy)" }; static const std::size_t expression_list_size = sizeof(expression_list) / sizeof(std::string); T x = T(0); T y = T(0); T z = T(0); T w = T(0); T xx = T(0); T yy = T(0); T zz = T(0); T ww = T(0); exprtk::symbol_table<T> symbol_table; symbol_table.add_constants(); symbol_table.add_variable( "x", x); symbol_table.add_variable( "y", y); symbol_table.add_variable( "z", z); symbol_table.add_variable( "w", w); symbol_table.add_variable("xx",xx); symbol_table.add_variable("yy",yy); symbol_table.add_variable("zz",zz); symbol_table.add_variable("ww",ww); typedef typename std::deque<exprtk::expression<T> > expr_list_t; expr_list_t expr_list; const std::size_t rounds = 50; { for (std::size_t r = 0; r < rounds; ++r) { expr_list.clear(); exprtk::parser<T> parser; for (std::size_t i = 0; i < expression_list_size; ++i) { exprtk::expression<T> expression; expression.register_symbol_table(symbol_table); if (!parser.compile(expression_list[i],expression)) { return false; } expr_list.push_back(expression); } } } struct execute { static inline T process(T& x, T& y, expression<T>& expression) { static const T lower_bound = T(-20); static const T upper_bound = T(+20); static const T delta = T(0.1); T total = T(0); for (x = lower_bound; x <= upper_bound; x += delta) { for (y = lower_bound; y <= upper_bound; y += delta) { total += expression.value(); } } return total; } }; for (std::size_t i = 0; i < expr_list.size(); ++i) { execute::process( x, y, expr_list[i]); execute::process(xx, yy, expr_list[i]); } { for (std::size_t i = 0; i < 10000; ++i) { const T v = T(123.456 + i); if (details::is_true(details::numeric::nequal(details::numeric::fast_exp<T, 1>::result(v),details::numeric::pow(v,T( 1))))) return false; #define else_stmt(N) \ else if (details::is_true(details::numeric::nequal(details::numeric::fast_exp<T,N>::result(v),details::numeric::pow(v,T(N))))) \ return false; \ else_stmt( 2) else_stmt( 3) else_stmt( 4) else_stmt( 5) else_stmt( 6) else_stmt( 7) else_stmt( 8) else_stmt( 9) else_stmt(10) else_stmt(11) else_stmt(12) else_stmt(13) else_stmt(14) else_stmt(15) else_stmt(16) else_stmt(17) else_stmt(18) else_stmt(19) else_stmt(20) else_stmt(21) else_stmt(22) else_stmt(23) else_stmt(24) else_stmt(25) else_stmt(26) else_stmt(27) else_stmt(28) else_stmt(29) else_stmt(30) else_stmt(31) else_stmt(32) else_stmt(33) else_stmt(34) else_stmt(35) else_stmt(36) else_stmt(37) else_stmt(38) else_stmt(39) else_stmt(40) else_stmt(41) else_stmt(42) else_stmt(43) else_stmt(44) else_stmt(45) else_stmt(46) else_stmt(47) else_stmt(48) else_stmt(49) else_stmt(50) else_stmt(51) else_stmt(52) else_stmt(53) else_stmt(54) else_stmt(55) else_stmt(56) else_stmt(57) else_stmt(58) else_stmt(59) else_stmt(60) else_stmt(61) } } return true; } } #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) # ifndef NOMINMAX # define NOMINMAX # endif # ifndef WIN32_LEAN_AND_MEAN # define WIN32_LEAN_AND_MEAN # endif # include <windows.h> # include <ctime> #else # include <ctime> # include <sys/time.h> # include <sys/types.h> #endif namespace exprtk { class timer { public: #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) timer() : in_use_(false) { QueryPerformanceFrequency(&clock_frequency_); } inline void start() { in_use_ = true; QueryPerformanceCounter(&start_time_); } inline void stop() { QueryPerformanceCounter(&stop_time_); in_use_ = false; } inline double time() const { return (1.0 * (stop_time_.QuadPart - start_time_.QuadPart)) / (1.0 * clock_frequency_.QuadPart); } #else timer() : in_use_(false) { start_time_.tv_sec = 0; start_time_.tv_usec = 0; stop_time_.tv_sec = 0; stop_time_.tv_usec = 0; } inline void start() { in_use_ = true; gettimeofday(&start_time_,0); } inline void stop() { gettimeofday(&stop_time_, 0); in_use_ = false; } inline unsigned long long int usec_time() const { if (!in_use_) { if (stop_time_.tv_sec >= start_time_.tv_sec) { return 1000000LLU * static_cast<unsigned long long int>(stop_time_.tv_sec - start_time_.tv_sec ) + static_cast<unsigned long long int>(stop_time_.tv_usec - start_time_.tv_usec) ; } else return std::numeric_limits<unsigned long long int>::max(); } else return std::numeric_limits<unsigned long long int>::max(); } inline double time() const { return usec_time() * 0.000001; } #endif inline bool in_use() const { return in_use_; } private: bool in_use_; #if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) LARGE_INTEGER start_time_; LARGE_INTEGER stop_time_; LARGE_INTEGER clock_frequency_; #else struct timeval start_time_; struct timeval stop_time_; #endif }; } // namespace exprtk #ifndef exprtk_disable_rtl_io namespace exprtk { namespace rtl { namespace io { namespace details { template <typename T> inline void print_type(const std::string& fmt, const T v, exprtk::details::numeric::details::real_type_tag) { printf(fmt.c_str(),v); } template <typename T> struct print_impl { typedef typename igeneric_function<T>::generic_type generic_type; typedef typename igeneric_function<T>::parameter_list_t parameter_list_t; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; typedef typename generic_type::string_view string_t; typedef typename exprtk::details::numeric::details::number_type<T>::type num_type; static void process(const std::string& scalar_format, parameter_list_t parameters) { for (std::size_t i = 0; i < parameters.size(); ++i) { generic_type& gt = parameters[i]; switch (gt.type) { case generic_type::e_scalar : print(scalar_format,scalar_t(gt)); break; case generic_type::e_vector : print(scalar_format,vector_t(gt)); break; case generic_type::e_string : print(string_t(gt)); break; default : continue; } } } static inline void print(const std::string& scalar_format, const scalar_t& s) { print_type(scalar_format,s(),num_type()); } static inline void print(const std::string& scalar_format, const vector_t& v) { for (std::size_t i = 0; i < v.size(); ++i) { print_type(scalar_format,v[i],num_type()); if ((i + 1) < v.size()) printf(" "); } } static inline void print(const string_t& s) { printf("%s",to_str(s).c_str()); } }; } // namespace exprtk::rtl::io::details template <typename T> struct print : public exprtk::igeneric_function<T> { typedef typename igeneric_function<T>::parameter_list_t parameter_list_t; using exprtk::igeneric_function<T>::operator(); print(const std::string& scalar_format = "%10.5f") : scalar_format_(scalar_format) { exprtk::enable_zero_parameters(*this); } inline T operator() (parameter_list_t parameters) { details::print_impl<T>::process(scalar_format_,parameters); return T(0); } std::string scalar_format_; }; template <typename T> struct println : public exprtk::igeneric_function<T> { typedef typename igeneric_function<T>::parameter_list_t parameter_list_t; using exprtk::igeneric_function<T>::operator(); println(const std::string& scalar_format = "%10.5f") : scalar_format_(scalar_format) { exprtk::enable_zero_parameters(*this); } inline T operator() (parameter_list_t parameters) { details::print_impl<T>::process(scalar_format_,parameters); printf("\n"); return T(0); } std::string scalar_format_; }; template <typename T> struct package { print <T> p; println<T> pl; bool register_package(exprtk::symbol_table<T>& symtab) { #define exprtk_register_function(FunctionName,FunctionType) \ if (!symtab.add_function(FunctionName,FunctionType)) \ { \ exprtk_debug(( \ "exprtk::rtl::io::register_package - Failed to add function: %s\n", \ FunctionName)); \ return false; \ } \ exprtk_register_function("print" , p) exprtk_register_function("println", pl) #undef exprtk_register_function return true; } }; } // namespace exprtk::rtl::io } // namespace exprtk::rtl } // namespace exprtk #endif #ifndef exprtk_disable_rtl_io_file #include <fstream> namespace exprtk { namespace rtl { namespace io { namespace file { namespace details { enum file_mode { e_error = 0, e_read = 1, e_write = 2, e_rdwrt = 4 }; struct file_descriptor { file_descriptor(const std::string& fname, const std::string& access) : stream_ptr(0), mode(get_file_mode(access)), file_name(fname) {} void* stream_ptr; file_mode mode; std::string file_name; bool open() { if (e_read == mode) { std::ifstream* stream = new std::ifstream(file_name.c_str(),std::ios::binary); if (!(*stream)) { file_name.clear(); delete stream; return false; } else stream_ptr = stream; return true; } else if (e_write == mode) { std::ofstream* stream = new std::ofstream(file_name.c_str(),std::ios::binary); if (!(*stream)) { file_name.clear(); delete stream; return false; } else stream_ptr = stream; return true; } else if (e_rdwrt == mode) { std::fstream* stream = new std::fstream(file_name.c_str(),std::ios::binary); if (!(*stream)) { file_name.clear(); delete stream; return false; } else stream_ptr = stream; return true; } else return false; } template <typename Stream, typename Ptr> void close(Ptr& p) { Stream* stream = reinterpret_cast<Stream*>(p); stream->close(); delete stream; p = reinterpret_cast<Ptr>(0); } bool close() { switch (mode) { case e_read : close<std::ifstream>(stream_ptr); break; case e_write : close<std::ofstream>(stream_ptr); break; case e_rdwrt : close<std::fstream> (stream_ptr); break; default : return false; } return true; } template <typename View> bool write(const View& view, const std::size_t amount, const std::size_t offset = 0) { switch (mode) { case e_write : reinterpret_cast<std::ofstream*>(stream_ptr)-> write(reinterpret_cast<const char*>(view.begin() + offset), amount * sizeof(typename View::value_t)); break; case e_rdwrt : reinterpret_cast<std::fstream*>(stream_ptr)-> write(reinterpret_cast<const char*>(view.begin() + offset) , amount * sizeof(typename View::value_t)); break; default : return false; } return true; } template <typename View> bool read(View& view, const std::size_t amount, const std::size_t offset = 0) { switch (mode) { case e_read : reinterpret_cast<std::ifstream*>(stream_ptr)-> read(reinterpret_cast<char*>(view.begin() + offset), amount * sizeof(typename View::value_t)); break; case e_rdwrt : reinterpret_cast<std::fstream*>(stream_ptr)-> read(reinterpret_cast<char*>(view.begin() + offset) , amount * sizeof(typename View::value_t)); break; default : return false; } return true; } bool getline(std::string& s) { switch (mode) { case e_read : return (!!std::getline(*reinterpret_cast<std::ifstream*>(stream_ptr),s)); case e_rdwrt : return (!!std::getline(*reinterpret_cast<std::fstream* >(stream_ptr),s)); default : return false; } } bool eof() { switch (mode) { case e_read : return reinterpret_cast<std::ifstream*>(stream_ptr)->eof(); case e_write : return reinterpret_cast<std::ofstream*>(stream_ptr)->eof(); case e_rdwrt : return reinterpret_cast<std::fstream* >(stream_ptr)->eof(); default : return true; } } file_mode get_file_mode(const std::string& access) { if (access.empty() || access.size() > 2) return e_error; std::size_t w_cnt = 0; std::size_t r_cnt = 0; for (std::size_t i = 0; i < access.size(); ++i) { switch (std::tolower(access[i])) { case 'r' : r_cnt++; break; case 'w' : w_cnt++; break; default : return e_error; } } if ((0 == r_cnt) && (0 == w_cnt)) return e_error; else if ((r_cnt > 1) || (w_cnt > 1)) return e_error; else if ((1 == r_cnt) && (1 == w_cnt)) return e_rdwrt; else if (1 == r_cnt) return e_read; else return e_write; } }; template <typename T> file_descriptor* make_handle(T v) { file_descriptor* fd = reinterpret_cast<file_descriptor*>(0); std::memcpy(reinterpret_cast<char*>(&fd), reinterpret_cast<const char*>(&v), sizeof(fd)); return fd; } template <typename T> void perform_check() { #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable: 4127) #endif if (sizeof(T) < sizeof(void*)) { throw std::runtime_error("exprtk::rtl::io::file - Error - pointer size larger than holder."); } #ifdef _MSC_VER #pragma warning(pop) #endif } } // namespace exprtk::rtl::io::file::details template <typename T> class open : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::string_view string_t; using exprtk::igeneric_function<T>::operator(); open() : exprtk::igeneric_function<T>("S|SS") { details::perform_check<T>(); } inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { std::string file_name; std::string access; file_name = to_str(string_t(parameters[0])); if (file_name.empty()) return T(0); if (0 == ps_index) access = "r"; else if (0 == string_t(parameters[1]).size()) return T(0); else access = to_str(string_t(parameters[1])); details::file_descriptor* fd = new details::file_descriptor(file_name,access); if (fd->open()) { T t = T(0); std::memcpy(reinterpret_cast<char*>(&t ), reinterpret_cast<char*>(&fd), sizeof(fd)); return t; } else { delete fd; return T(0); } } }; template <typename T> struct close : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); close() : exprtk::ifunction<T>(1) { details::perform_check<T>(); } inline T operator() (const T& v) { details::file_descriptor* fd = details::make_handle(v); if (!fd->close()) return T(0); delete fd; return T(1); } }; template <typename T> class write : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::string_view string_t; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); write() : igfun_t("TS|TST|TV|TVT") { details::perform_check<T>(); } inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { details::file_descriptor* fd = details::make_handle(scalar_t(parameters[0])()); std::size_t amount = 0; switch (ps_index) { case 0 : { const string_t buffer(parameters[1]); amount = buffer.size(); return T(fd->write(buffer,amount) ? 1 : 0); } case 1 : { const string_t buffer(parameters[1]); amount = std::min(buffer.size(), static_cast<std::size_t>(scalar_t(parameters[2])())); return T(fd->write(buffer,amount) ? 1 : 0); } case 2 : { const vector_t vec(parameters[1]); amount = vec.size(); return T(fd->write(vec,amount) ? 1 : 0); } case 3 : { const vector_t vec(parameters[1]); amount = std::min(vec.size(), static_cast<std::size_t>(scalar_t(parameters[2])())); return T(fd->write(vec,amount) ? 1 : 0); } } return T(0); } }; template <typename T> class read : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::string_view string_t; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); read() : igfun_t("TS|TST|TV|TVT") { details::perform_check<T>(); } inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { details::file_descriptor* fd = details::make_handle(scalar_t(parameters[0])()); std::size_t amount = 0; switch (ps_index) { case 0 : { string_t buffer(parameters[1]); amount = buffer.size(); return T(fd->read(buffer,amount) ? 1 : 0); } case 1 : { string_t buffer(parameters[1]); amount = std::min(buffer.size(), static_cast<std::size_t>(scalar_t(parameters[2])())); return T(fd->read(buffer,amount) ? 1 : 0); } case 2 : { vector_t vec(parameters[1]); amount = vec.size(); return T(fd->read(vec,amount) ? 1 : 0); } case 3 : { vector_t vec(parameters[1]); amount = std::min(vec.size(), static_cast<std::size_t>(scalar_t(parameters[2])())); return T(fd->read(vec,amount) ? 1 : 0); } } return T(0); } }; template <typename T> class getline : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::string_view string_t; typedef typename generic_type::scalar_view scalar_t; using exprtk::igeneric_function<T>::operator(); getline() : igfun_t("T",igfun_t::e_rtrn_string) { details::perform_check<T>(); } inline T operator() (std::string& result, parameter_list_t parameters) { details::file_descriptor* fd = details::make_handle(scalar_t(parameters[0])()); return T(fd->getline(result) ? 1 : 0); } }; template <typename T> struct eof : public exprtk::ifunction<T> { using exprtk::ifunction<T>::operator(); eof() : exprtk::ifunction<T>(1) { details::perform_check<T>(); } inline T operator() (const T& v) { details::file_descriptor* fd = details::make_handle(v); return (fd->eof() ? T(1) : T(0)); } }; template <typename T> struct package { open <T> o; close <T> c; write <T> w; read <T> r; getline<T> g; eof <T> e; bool register_package(exprtk::symbol_table<T>& symtab) { #define exprtk_register_function(FunctionName,FunctionType) \ if (!symtab.add_function(FunctionName,FunctionType)) \ { \ exprtk_debug(( \ "exprtk::rtl::io::file::register_package - Failed to add function: %s\n", \ FunctionName)); \ return false; \ } \ exprtk_register_function("open" ,o) exprtk_register_function("close" ,c) exprtk_register_function("write" ,w) exprtk_register_function("read" ,r) exprtk_register_function("getline",g) exprtk_register_function("eof" ,e) #undef exprtk_register_function return true; } }; } // namespace exprtk::rtl::io::file } // namespace exprtk::rtl::io } // namespace exprtk::rtl } // namespace exprtk #endif #ifndef exprtk_disable_rtl_vecops namespace exprtk { namespace rtl { namespace vecops { namespace helper { template <typename Vector> inline bool invalid_range(const Vector& v, const std::size_t r0, const std::size_t r1) { if (r0 > (v.size() - 1)) return true; else if (r1 > (v.size() - 1)) return true; else if (r1 < r0) return true; else return false; } template <typename T> struct load_vector_range { typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; static inline bool process(parameter_list_t& parameters, std::size_t& r0, std::size_t& r1, const std::size_t& r0_prmidx, const std::size_t& r1_prmidx, const std::size_t vec_idx = 0) { if (r0_prmidx >= parameters.size()) return false; if (r1_prmidx >= parameters.size()) return false; if (!scalar_t(parameters[r0_prmidx]).to_uint(r0)) return false; if (!scalar_t(parameters[r1_prmidx]).to_uint(r1)) return false; return !invalid_range(vector_t(parameters[vec_idx]), r0, r1); } }; } namespace details { template <typename T> inline void kahan_sum(T& sum, T& error, T v) { T x = v - error; T y = sum + x; error = (y - sum) - x; sum = y; } } // namespace exprtk::rtl::details template <typename T> class all_true : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); all_true() : exprtk::igeneric_function<T>("V|VTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0) ) return std::numeric_limits<T>::quiet_NaN(); for (std::size_t i = r0; i <= r1; ++i) { if (vec[i] == T(0)) { return T(0); } } return T(1); } }; template <typename T> class all_false : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); all_false() : exprtk::igeneric_function<T>("V|VTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0) ) return std::numeric_limits<T>::quiet_NaN(); for (std::size_t i = r0; i <= r1; ++i) { if (vec[i] != T(0)) { return T(0); } } return T(1); } }; template <typename T> class any_true : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); any_true() : exprtk::igeneric_function<T>("V|VTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0) ) return std::numeric_limits<T>::quiet_NaN(); for (std::size_t i = r0; i <= r1; ++i) { if (vec[i] != T(0)) { return T(1); } } return T(0); } }; template <typename T> class any_false : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); any_false() : exprtk::igeneric_function<T>("V|VTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0) ) return std::numeric_limits<T>::quiet_NaN(); for (std::size_t i = r0; i <= r1; ++i) { if (vec[i] == T(0)) { return T(1); } } return T(0); } }; template <typename T> class count : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); count() : exprtk::igeneric_function<T>("V|VTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0) ) return std::numeric_limits<T>::quiet_NaN(); std::size_t cnt = 0; for (std::size_t i = r0; i <= r1; ++i) { if (vec[i] != T(0)) ++cnt; } return T(cnt); } }; template <typename T> class copy : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); copy() : exprtk::igeneric_function<T>("VV|VTTVTT") /* Overloads: 0. VV - x(vector), y(vector) 1. VTTVTT - x(vector), xr0, xr1, y(vector), yr0, yr1, */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[0]); vector_t y(parameters[(0 == ps_index) ? 1 : 3]); std::size_t xr0 = 0; std::size_t xr1 = x.size() - 1; std::size_t yr0 = 0; std::size_t yr1 = y.size() - 1; if (1 == ps_index) { if ( !helper::load_vector_range<T>::process(parameters, xr0, xr1, 1, 2, 0) || !helper::load_vector_range<T>::process(parameters, yr0, yr1, 4, 5, 3) ) return T(0); } const std::size_t n = std::min(xr1 - xr0 + 1, yr1 - yr0 + 1); std::copy(x.begin() + xr0, x.begin() + xr0 + n, y.begin() + yr0); return T(n); } }; template <typename T> class rol : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); rol() : exprtk::igeneric_function<T>("VT|VTTT") /* Overloads: 0. VT - vector, N 1. VTTT - vector, N, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); std::size_t n = 0; std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if (!scalar_t(parameters[1]).to_uint(n)) return T(0); if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0) ) return T(0); std::size_t dist = r1 - r0 + 1; std::size_t shift = n % dist; std::rotate(vec.begin() + r0, vec.begin() + r0 + shift, vec.begin() + r1 + 1); return T(1); } }; template <typename T> class ror : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); ror() : exprtk::igeneric_function<T>("VT|VTTT") /* Overloads: 0. VT - vector, N 1. VTTT - vector, N, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); std::size_t n = 0; std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if (!scalar_t(parameters[1]).to_uint(n)) return T(0); if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0) ) return T(0); std::size_t dist = r1 - r0 + 1; std::size_t shift = (dist - (n % dist)) % dist; std::rotate(vec.begin() + r0, vec.begin() + r0 + shift, vec.begin() + r1 + 1); return T(1); } }; template <typename T> class shift_left : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); shift_left() : exprtk::igeneric_function<T>("VT|VTTT") /* Overloads: 0. VT - vector, N 1. VTTT - vector, N, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); std::size_t n = 0; std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if (!scalar_t(parameters[1]).to_uint(n)) return T(0); if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0) ) return T(0); std::size_t dist = r1 - r0 + 1; if (n > dist) return T(0); std::rotate(vec.begin() + r0, vec.begin() + r0 + n, vec.begin() + r1 + 1); for (std::size_t i = r1 - n + 1; i <= r1; ++i) { vec[i] = T(0); } return T(1); } }; template <typename T> class shift_right : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); shift_right() : exprtk::igeneric_function<T>("VT|VTTT") /* Overloads: 0. VT - vector, N 1. VTTT - vector, N, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); std::size_t n = 0; std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if (!scalar_t(parameters[1]).to_uint(n)) return T(0); if ( (1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0) ) return T(0); std::size_t dist = r1 - r0 + 1; if (n > dist) return T(0); std::size_t shift = (dist - (n % dist)) % dist; std::rotate(vec.begin() + r0, vec.begin() + r0 + shift, vec.begin() + r1 + 1); for (std::size_t i = r0; i < r0 + n; ++i) { vec[i] = T(0); } return T(1); } }; template <typename T> class sort : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::string_view string_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); sort() : exprtk::igeneric_function<T>("V|VTT|VS|VSTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 2. VS - vector, string 3. VSTT - vector, string, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0)) return T(0); if ((3 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0)) return T(0); bool ascending = true; if ((2 == ps_index) || (3 == ps_index)) { if (exprtk::details::imatch(to_str(string_t(parameters[1])),"ascending")) ascending = true; else if (exprtk::details::imatch(to_str(string_t(parameters[1])),"descending")) ascending = false; else return T(0); } if (ascending) std::sort(vec.begin() + r0, vec.begin() + r1 + 1, std::less<T> ()); else std::sort(vec.begin() + r0, vec.begin() + r1 + 1, std::greater<T>()); return T(1); } }; template <typename T> class nthelement : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); nthelement() : exprtk::igeneric_function<T>("VT|VTTT") /* Overloads: 0. VT - vector, nth-element 1. VTTT - vector, nth-element, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); std::size_t n = 0; std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if (!scalar_t(parameters[1]).to_uint(n)) return T(0); if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0)) return std::numeric_limits<T>::quiet_NaN(); std::nth_element(vec.begin() + r0, vec.begin() + r0 + n , vec.begin() + r1 + 1); return T(1); } }; template <typename T> class iota : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); iota() : exprtk::igeneric_function<T>("VT|VTT|VTTT|VTTTT") /* Overloads: 0. VT - vector, increment 1. VTT - vector, increment, base 2. VTTTT - vector, increment, r0, r1 3. VTTTT - vector, increment, base, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { vector_t vec(parameters[0]); T increment = scalar_t(parameters[1])(); T base = ((1 == ps_index) || (3 == ps_index)) ? scalar_t(parameters[2])() : T(0); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ((2 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0)) return std::numeric_limits<T>::quiet_NaN(); else if ((3 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 3, 4, 0)) return std::numeric_limits<T>::quiet_NaN(); else { long long j = 0; for (std::size_t i = r0; i <= r1; ++i, ++j) { vec[i] = base + (increment * j); } } return T(1); } }; template <typename T> class sumk : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); sumk() : exprtk::igeneric_function<T>("V|VTT") /* Overloads: 0. V - vector 1. VTT - vector, r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t vec(parameters[0]); std::size_t r0 = 0; std::size_t r1 = vec.size() - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 1, 2, 0)) return std::numeric_limits<T>::quiet_NaN(); T result = T(0); T error = T(0); for (std::size_t i = r0; i <= r1; ++i) { details::kahan_sum(result, error, vec[i]); } return result; } }; template <typename T> class axpy : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); axpy() : exprtk::igeneric_function<T>("TVV|TVVTT") /* y <- ax + y Overloads: 0. TVV - a, x(vector), y(vector) 1. TVVTT - a, x(vector), y(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[1]); vector_t y(parameters[2]); std::size_t r0 = 0; std::size_t r1 = std::min(x.size(),y.size()) - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 3, 4, 1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(y, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); T a = scalar_t(parameters[0])(); for (std::size_t i = r0; i <= r1; ++i) { y[i] = (a * x[i]) + y[i]; } return T(1); } }; template <typename T> class axpby : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); axpby() : exprtk::igeneric_function<T>("TVTV|TVTVTT") /* y <- ax + by Overloads: 0. TVTV - a, x(vector), b, y(vector) 1. TVTVTT - a, x(vector), b, y(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[1]); vector_t y(parameters[3]); std::size_t r0 = 0; std::size_t r1 = std::min(x.size(),y.size()) - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 4, 5, 1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(y, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); const T a = scalar_t(parameters[0])(); const T b = scalar_t(parameters[2])(); for (std::size_t i = r0; i <= r1; ++i) { y[i] = (a * x[i]) + (b * y[i]); } return T(1); } }; template <typename T> class axpyz : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); axpyz() : exprtk::igeneric_function<T>("TVVV|TVVVTT") /* z <- ax + y Overloads: 0. TVVV - a, x(vector), y(vector), z(vector) 1. TVVVTT - a, x(vector), y(vector), z(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[1]); const vector_t y(parameters[2]); vector_t z(parameters[3]); std::size_t r0 = 0; std::size_t r1 = std::min(x.size(),y.size()) - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 3, 4, 1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(y, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(z, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); T a = scalar_t(parameters[0])(); for (std::size_t i = r0; i <= r1; ++i) { z[i] = (a * x[i]) + y[i]; } return T(1); } }; template <typename T> class axpbyz : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); axpbyz() : exprtk::igeneric_function<T>("TVTVV|TVTVVTT") /* z <- ax + by Overloads: 0. TVTVV - a, x(vector), b, y(vector), z(vector) 1. TVTVVTT - a, x(vector), b, y(vector), z(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[1]); const vector_t y(parameters[3]); vector_t z(parameters[4]); std::size_t r0 = 0; std::size_t r1 = std::min(x.size(),y.size()) - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 4, 5, 1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(y, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(z, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); const T a = scalar_t(parameters[0])(); const T b = scalar_t(parameters[2])(); for (std::size_t i = r0; i <= r1; ++i) { z[i] = (a * x[i]) + (b * y[i]); } return T(1); } }; template <typename T> class axpbz : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); axpbz() : exprtk::igeneric_function<T>("TVTV|TVTVTT") /* z <- ax + b Overloads: 0. TVTV - a, x(vector), b, z(vector) 1. TVTVTT - a, x(vector), b, z(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[1]); vector_t z(parameters[3]); std::size_t r0 = 0; std::size_t r1 = x.size() - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 4, 5, 1)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(z, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); const T a = scalar_t(parameters[0])(); const T b = scalar_t(parameters[2])(); for (std::size_t i = r0; i <= r1; ++i) { z[i] = (a * x[i]) + b; } return T(1); } }; template <typename T> class dot : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); dot() : exprtk::igeneric_function<T>("VV|VVTT") /* Overloads: 0. VV - x(vector), y(vector) 1. VVTT - x(vector), y(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[0]); const vector_t y(parameters[1]); std::size_t r0 = 0; std::size_t r1 = std::min(x.size(),y.size()) - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(y, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); T result = T(0); for (std::size_t i = r0; i <= r1; ++i) { result += (x[i] * y[i]); } return result; } }; template <typename T> class dotk : public exprtk::igeneric_function<T> { public: typedef typename exprtk::igeneric_function<T> igfun_t; typedef typename igfun_t::parameter_list_t parameter_list_t; typedef typename igfun_t::generic_type generic_type; typedef typename generic_type::scalar_view scalar_t; typedef typename generic_type::vector_view vector_t; using exprtk::igeneric_function<T>::operator(); dotk() : exprtk::igeneric_function<T>("VV|VVTT") /* Overloads: 0. VV - x(vector), y(vector) 1. VVTT - x(vector), y(vector), r0, r1 */ {} inline T operator() (const std::size_t& ps_index, parameter_list_t parameters) { const vector_t x(parameters[0]); const vector_t y(parameters[1]); std::size_t r0 = 0; std::size_t r1 = std::min(x.size(),y.size()) - 1; if ((1 == ps_index) && !helper::load_vector_range<T>::process(parameters, r0, r1, 2, 3, 0)) return std::numeric_limits<T>::quiet_NaN(); else if (helper::invalid_range(y, r0, r1)) return std::numeric_limits<T>::quiet_NaN(); T result = T(0); T error = T(0); for (std::size_t i = r0; i <= r1; ++i) { details::kahan_sum(result, error, (x[i] * y[i])); } return result; } }; template <typename T> struct package { all_true <T> at; all_false <T> af; any_true <T> nt; any_false <T> nf; count <T> c; copy <T> cp; rol <T> rl; ror <T> rr; shift_left <T> sl; shift_right<T> sr; sort <T> st; nthelement <T> ne; iota <T> ia; sumk <T> sk; axpy <T> b1_axpy; axpby <T> b1_axpby; axpyz <T> b1_axpyz; axpbyz <T> b1_axpbyz; axpbz <T> b1_axpbz; dot <T> dt; dotk <T> dtk; bool register_package(exprtk::symbol_table<T>& symtab) { #define exprtk_register_function(FunctionName,FunctionType) \ if (!symtab.add_function(FunctionName,FunctionType)) \ { \ exprtk_debug(( \ "exprtk::rtl::vecops::register_package - Failed to add function: %s\n", \ FunctionName)); \ return false; \ } \ exprtk_register_function("all_true" ,at) exprtk_register_function("all_false" ,af) exprtk_register_function("any_true" ,nt) exprtk_register_function("any_false" ,nf) exprtk_register_function("count" , c) exprtk_register_function("copy" , cp) exprtk_register_function("rotate_left" ,rl) exprtk_register_function("rol" ,rl) exprtk_register_function("rotate_right" ,rr) exprtk_register_function("ror" ,rr) exprtk_register_function("shftl" ,sl) exprtk_register_function("shftr" ,sr) exprtk_register_function("sort" ,st) exprtk_register_function("nth_element" ,ne) exprtk_register_function("iota" ,ia) exprtk_register_function("sumk" ,sk) exprtk_register_function("axpy" ,b1_axpy) exprtk_register_function("axpby" ,b1_axpby) exprtk_register_function("axpyz" ,b1_axpyz) exprtk_register_function("axpbyz",b1_axpbyz) exprtk_register_function("axpbz" ,b1_axpbz) exprtk_register_function("dot" ,dt) exprtk_register_function("dotk" ,dtk) #undef exprtk_register_function return true; } }; } // namespace exprtk::rtl::vecops } // namespace exprtk::rtl } // namespace exprtk #endif namespace exprtk { namespace information { static const char* library = "Mathematical Expression Toolkit"; static const char* version = "2.718281828459045235360287471352662497757247093699" "95957496696762772407663035354759457138217852516642"; static const char* date = "20180913"; static inline std::string data() { static const std::string info_str = std::string(library) + std::string(" v") + std::string(version) + std::string(" (") + date + std::string(")"); return info_str; } } // namespace information #ifdef exprtk_debug #undef exprtk_debug #endif #ifdef exprtk_error_location #undef exprtk_error_location #endif #ifdef exprtk_disable_fallthrough_begin #undef exprtk_disable_fallthrough_begin #endif #ifdef exprtk_disable_fallthrough_end #undef exprtk_disable_fallthrough_end #endif } // namespace exprtk #endif ================================================ FILE: C/common/include/file_utils.h ================================================ /* * Fledge utilities functions for handling files and directories * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Ray Verhoeff */ #pragma once #include <string> int copyFile(const char *to, const char *from); void createDirectory(const std::string &directoryName); int removeDirectory(const char *path); ================================================ FILE: C/common/include/filter_pipeline.h ================================================ #ifndef _FILTER_PIPELINE_H #define _FILTER_PIPELINE_H /* * Fledge filter pipeline class. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <plugin.h> #include <plugin_manager.h> #include <config_category.h> #include <management_client.h> #include <plugin_data.h> #include <reading_set.h> #include <filter_plugin.h> #include <service_handler.h> #include <pipeline_element.h> typedef void (*filterReadingSetFn)(OUTPUT_HANDLE *outHandle, READINGSET* readings); /** * The FilterPipeline class is used to represent a pipeline of filters * applicable to a task/service. Methods are provided to load filters, * setup filtering pipeline and for pipeline/filters cleanup. */ class FilterPipeline { public: FilterPipeline(ManagementClient* mgtClient, StorageClient& storage, std::string serviceName); ~FilterPipeline(); PipelineElement *getFirstFilterPlugin() { return (m_filters.begin() == m_filters.end()) ? NULL : *(m_filters.begin()); }; unsigned int getFilterCount() { return m_filters.size(); } void configChange(const std::string&, const std::string&); // Cleanup the loaded filters void cleanupFilters(const std::string& categoryName); // Load filters as specified in the configuration bool loadFilters(const std::string& categoryName); // Setup the filter pipeline bool setupFiltersPipeline(void *passToOnwardFilter, void *useFilteredData, void *ingest); // Check FilterPipeline is ready for data ingest bool isReady() { return m_ready; }; bool hasChanged(const std::string pipeline) const { return m_pipeline != pipeline; } bool isShuttingDown() { return m_shutdown; }; void setShuttingDown() { m_shutdown = true; } void execute(); void awaitCompletion(); void startBranch(); void completeBranch(); // The filter pipeline debugger entry points bool attachDebugger(); void detachDebugger(); void setDebuggerBuffer(unsigned int size); std::string getDebuggerBuffer(); std::string getDebuggerBuffer(const std::string& name); bool replayDebugger(); private: PLUGIN_HANDLE loadFilterPlugin(const std::string& filterName); void loadPipeline(const rapidjson::Value& filters, std::vector<PipelineElement *>& pipeline); bool attachDebugger(const std::vector<PipelineElement *>& pipeline); void detachDebugger(const std::vector<PipelineElement *>& pipeline); void setDebuggerBuffer(const std::vector<PipelineElement *>& pipeline, unsigned int size); std::string getDebuggerBuffer(const std::vector<PipelineElement *>& pipeline); std::string readingsToJSON(std::vector<std::shared_ptr<Reading>> readings); protected: ManagementClient* mgtClient; StorageClient& storage; std::string serviceName; std::vector<PipelineElement *> m_filters; // Elements in the "trunk" pipeline std::map<std::string, PipelineElement *> m_filterCategories; std::string m_pipeline; bool m_ready; bool m_shutdown; ServiceHandler *m_serviceHandler; int m_activeBranches; std::mutex m_actives; std::condition_variable m_branchActivations; }; #endif ================================================ FILE: C/common/include/filter_plugin.h ================================================ #ifndef _FILTER_PLUGIN_H #define _FILTER_PLUGIN_H /* * Fledge filter plugin class. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <plugin.h> #include <plugin_manager.h> #include <config_category.h> #include <management_client.h> #include <plugin_data.h> #include <reading_set.h> // This is a C++ ReadingSet class instance passed through typedef ReadingSet READINGSET; // Data handle passed to function pointer typedef void OUTPUT_HANDLE; // Function pointer called by "plugin_ingest" plugin method typedef void (*OUTPUT_STREAM)(OUTPUT_HANDLE *, READINGSET *); // FilterPlugin class class FilterPlugin : public Plugin { public: FilterPlugin(const std::string& name, PLUGIN_HANDLE handle); ~FilterPlugin(); const std::string getName() const { return m_name; }; PLUGIN_HANDLE init(const ConfigCategory& config, OUTPUT_HANDLE* outHandle, OUTPUT_STREAM outputFunc); void shutdown(); void ingest(READINGSET *); bool persistData() { return info->options & SP_PERSIST_DATA; }; void startData(const std::string& pluginData); std::string shutdownSaveData(); void start(); void reconfigure(const std::string&); private: PLUGIN_HANDLE (*pluginInit)(const ConfigCategory* config, OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output); void (*pluginShutdownPtr)(PLUGIN_HANDLE); void (*pluginReconfigurePtr)(PLUGIN_HANDLE, const std::string&); void (*pluginIngestPtr)(PLUGIN_HANDLE, READINGSET *); std::string (*pluginShutdownDataPtr)(const PLUGIN_HANDLE); void (*pluginStartDataPtr)(PLUGIN_HANDLE, const std::string& pluginData); void (*pluginStartPtr)(PLUGIN_HANDLE); public: // Persist plugin data PluginData* m_plugin_data; private: std::string m_name; PLUGIN_HANDLE m_instance; }; #endif ================================================ FILE: C/common/include/form_data.h ================================================ #ifndef _FORM_DATA_H #define _FORM_DATA_H /* * Fledge utilities functions for handling HTTP form data upload * with multipart data * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <server_http.hpp> #define CR '\r' #define LF '\n' /** * This class represents a parsed HTTP form data uploaded * to SimpleWeb::Server<SimpleWeb::HTTP * * FormData::FieldValue holds the field value as buffer start, size * and filename if data comes form a file upload * * FormData holds the input buffer, size and boundary multipart data * * Public methods fetch value of a given field name * and save file to filesystem */ class FormData { public: class FieldValue { public: FieldValue() { size = 0; start = NULL; }; const uint8_t* start; size_t size; std::string filename; }; public: FormData(std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Request> request); void getUploadedData(const std::string& field, FieldValue& data); void getUploadedFile(const std::string& field, FieldValue& data); bool saveFile(FieldValue& b, const std::string& fileName); private: uint8_t* skipSeparator(uint8_t *b); uint8_t* skipDoubleSeparator(uint8_t *b); uint8_t* getContentEnd(uint8_t *b); uint8_t* findDataFormField(uint8_t* buffer, const std::string& field); private: const uint8_t* m_buffer; // pointer to already allocated buffer data size_t m_size; // buffer size std::string m_boundary; // multipart boundary }; #endif ================================================ FILE: C/common/include/insert.h ================================================ #ifndef _INSERT_H #define _INSERT_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> #include <vector> #include <resultset.h> #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/error/error.h" /** * Class that defines data to be inserted or updated in a column within the table */ class InsertValue { public: InsertValue(const std::string& column, const std::string& value) : m_column(column) { m_value.str = (char *)malloc(value.length() + 1); strncpy(m_value.str, value.c_str(), value.length() + 1); m_type = STRING_COLUMN; }; InsertValue(const std::string& column, const int value) : m_column(column) { m_value.ival = value; m_type = INT_COLUMN; }; InsertValue(const std::string& column, const long value) : m_column(column) { m_value.ival = value; m_type = INT_COLUMN; }; InsertValue(const std::string& column, const double value) : m_column(column) { m_value.fval = value; m_type = NUMBER_COLUMN; }; InsertValue(const std::string& column, const rapidjson::Value& value) : m_column(column) { rapidjson::StringBuffer sb; rapidjson::Writer<rapidjson::StringBuffer> writer(sb); value.Accept(writer); std::string s = sb.GetString(); m_value.str = (char *)malloc(s.length() + 1); strncpy(m_value.str, s.c_str(), s.length() + 1); m_type = JSON_COLUMN; }; // Insert a NULL value for the given column InsertValue(const std::string& column) : m_column(column) { m_type = NULL_COLUMN; m_value.str = NULL; } InsertValue(const InsertValue& rhs) : m_column(rhs.m_column) { m_type = rhs.m_type; switch (rhs.m_type) { case INT_COLUMN: m_value.ival = rhs.m_value.ival; break; case NUMBER_COLUMN: m_value.fval = rhs.m_value.fval; break; case STRING_COLUMN: m_value.str = strdup(rhs.m_value.str); break; case JSON_COLUMN: // Internally stored a a string m_value.str = strdup(rhs.m_value.str); break; case NULL_COLUMN: m_value.str = NULL; break; case BOOL_COLUMN: // TODO break; } } ~InsertValue() { if (m_type == STRING_COLUMN || m_type == JSON_COLUMN) { free(m_value.str); } }; const std::string toJSON() const { std::ostringstream json; json << "\"" << m_column << "\" : "; switch (m_type) { case JSON_COLUMN: json << m_value.str; break; case BOOL_COLUMN: json << m_value.ival; break; case INT_COLUMN: json << m_value.ival; break; case NUMBER_COLUMN: json << m_value.fval; break; case STRING_COLUMN: json << "\"" << m_value.str << "\""; break; case NULL_COLUMN: // JSON output for NULL value json << "null"; break; } return json.str(); } private: InsertValue& operator=(InsertValue const& rhs); const std::string m_column; ColumnType m_type; union { char *str; long ival; double fval; } m_value; }; class InsertValues : public std::vector<InsertValue> { public: const std::string toJSON() const { std::ostringstream json; json << "{ "; for (std::vector<InsertValue>::const_iterator it = this->cbegin(); it != this->cend(); ++it) { json << it->toJSON(); if (it + 1 != this->cend()) json << ", "; else json << " "; } json << "}"; return json.str(); }; }; #endif ================================================ FILE: C/common/include/join.h ================================================ #ifndef _JOIN_H #define _JOIN_H /* * Fledge storage client. * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> class Query; /** * Join clause representation */ class Join { public: Join(const std::string& table, const std::string& on, Query *query) : m_table(table), m_column(on), m_on(on), m_query(query) { }; Join(const std::string& table, const std::string& column, const std::string& on, Query *query) : m_table(table), m_column(column), m_on(on), m_query(query) { }; ~Join(); const std::string toJSON() const; private: Join(const Join&); Join& operator=(Join const&); const std::string m_table; const std::string m_column; const std::string m_on; Query *m_query; }; #endif ================================================ FILE: C/common/include/json_properties.h ================================================ #ifndef _JSON_PROPERTIES_H #define _JSON_PROPERTIES_H /* * Fledge storage client. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> #include <vector> class JSONProperty { public: JSONProperty(const std::string& column, std::vector<std::string> path, const std::string& value) : m_column(column), m_value(value) { for (std::vector<std::string>::const_iterator it = path.cbegin(); it != path.cend(); ++it) m_path.push_back(*it); } const std::string toJSON() const { std::ostringstream json; json << "{ \"column\" : \"" << m_column << "\","; json << " \"path\" : ["; for (std::vector<std::string>::const_iterator it = m_path.cbegin(); it != m_path.cend(); ++it) { json << "\"" << *it << "\""; if ((it + 1) != m_path.cend()) json << ","; } json << "],"; json << "\"value\" : \"" << m_value << "\" }"; return json.str(); } private: const std::string m_column; const std::string m_value; std::vector<std::string> m_path; }; /** * Class that defines JSON properties for update */ class JSONProperties : public std::vector<JSONProperty> { public: const std::string toJSON() const { std::ostringstream json; json << "\"json_properties\" : [ "; for (std::vector<JSONProperty>::const_iterator it = this->cbegin(); it != this->cend(); ++it) { json << it->toJSON(); if (it + 1 != this->cend()) json << ", "; else json << " "; } json << "]"; return json.str(); }; }; #endif ================================================ FILE: C/common/include/json_provider.h ================================================ #ifndef _JSONPROVIDER_H #define _JSONPROVIDER_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> class JSONProvider { public: virtual void asJSON(std::string &) const = 0; }; #endif ================================================ FILE: C/common/include/json_utils.h ================================================ #ifndef _JSON_UTILS_H #define _JSON_UTILS_H /* * Fledge utilities functions for handling JSON document * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ bool JSONStringToVectorString(std::vector<std::string>& vectorString, const std::string& JSONString, const std::string& Key); std::string JSONescape(const std::string& subject); std::string JSONunescape(const std::string& subject); #endif ================================================ FILE: C/common/include/logger.h ================================================ #ifndef _LOGGER_H #define _LOGGER_H /* * Fledge storage service. * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <string> #include <functional> #include <map> #include <mutex> #include <queue> #include <thread> #include <condition_variable> #include <atomic> #include <sys/socket.h> #include <arpa/inet.h> #define PRINT_FUNC Logger::getLogger()->info("%s:%d", __FUNCTION__, __LINE__); /** * Fledge Logger class used to log to syslog * * At startup this class should be constructed * using the standard constructor. To log a message * call debug, info, warn etc. using the instance * of the class. * * To obtain that singleton instance call the static * method getLogger. * * It is generally unsafe to delete the logger class * as it may be called asynchronouly from multiple * threads and single handlers. The destructor has * hence been made private to prevent the destruction * of the class. */ class Logger { public: enum class LogLevel { ERROR, WARNING, INFO, DEBUG, FATAL }; Logger(const std::string& application); ~Logger(); static Logger *getLogger(); void debug(const std::string& msg, ...); void printLongString(const std::string&, LogLevel = LogLevel::DEBUG); void info(const std::string& msg, ...); void warn(const std::string& msg, ...); void error(const std::string& msg, ...); void fatal(const std::string& msg, ...); void setMinLevel(const std::string& level); std::string& getMinLevel() { return levelString; } // LogInterceptor callback function signature typedef void (*LogInterceptor)(LogLevel, const std::string&, void*); // Register an interceptor bool registerInterceptor(LogLevel level, LogInterceptor callback, void* userData); // Unregister an interceptor bool unregisterInterceptor(LogLevel level, LogInterceptor callback); private: std::string *format(const std::string& msg, va_list ap); static Logger *instance; std::string levelString; int m_level; struct InterceptorData { LogInterceptor callback; void* userData; }; std::multimap<LogLevel, InterceptorData> m_interceptors; std::mutex m_interceptorMapMutex; struct LogTask { LogLevel level; std::string message; LogInterceptor callback; void* userData; }; std::queue<LogTask> m_taskQueue; std::mutex m_queueMutex; std::condition_variable m_condition; std::atomic<bool> m_runWorker; std::thread *m_workerThread; void log(int sysLogLvl, const char * lvlName, LogLevel appLogLvl, const std::string& msg, va_list args); void sendToUdpSink(const std::string& msg); void executeInterceptor(LogLevel level, const std::string& message); void workerThread(); int m_UdpSockFD = -1; struct sockaddr_in m_UdpServerAddr; bool m_SyslogUdpEnabled = false; std::string m_identifier; std::string m_hostname; }; #endif ================================================ FILE: C/common/include/management_client.h ================================================ #ifndef _MANAGEMENT_CLIENT_H #define _MANAGEMENT_CLIENT_H /* * Fledge storage service. * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <client_http.hpp> #include <server_http.hpp> #include <config_category.h> #include <service_record.h> #include <logger.h> #include <string> #include <map> #include <vector> #include <rapidjson/document.h> #include <asset_tracking.h> #include <json_utils.h> #include <thread> #include <bearer_token.h> #include <acl.h> #include "utils.h" using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; using namespace rapidjson; class AssetTrackingTuple; class AssetTrackingTable; class StorageAssetTrackingTuple; /** * The management client class used by services and tasks to communicate * with the management API of the Fledge core microservice. * * The class encapsulates the management REST API and provides methods for accessing each * of those APIs. */ class ManagementClient { public: ManagementClient(const std::string& hostname, const unsigned short port); ~ManagementClient(); bool registerService(const ServiceRecord& service); bool unregisterService(); bool restartService(); bool getService(ServiceRecord& service); bool getServices(std::vector<ServiceRecord *>& services); bool getServices(std::vector<ServiceRecord *>& services, const std::string& type); bool registerCategory(const std::string& categoryName); bool registerCategoryChild(const std::string& categoryName); bool unregisterCategory(const std::string& categoryName); ConfigCategories getCategories(); ConfigCategory getCategory(const std::string& categoryName); std::string setCategoryItemValue(const std::string& categoryName, const std::string& itemName, const std::string& itemValue); std::string addChildCategories(const std::string& parentCategory, const std::vector<std::string>& children); std::vector<AssetTrackingTuple*>& getAssetTrackingTuples(const std::string serviceName = ""); std::vector<StorageAssetTrackingTuple*>& getStorageAssetTrackingTuples(const std::string serviceName); StorageAssetTrackingTuple* getStorageAssetTrackingTuple(const std::string& serviceName, const std::string& assetName, const std::string& event, const std::string & dp, const unsigned int& c); bool addAssetTrackingTuple(const std::string& service, const std::string& plugin, const std::string& asset, const std::string& event); bool addStorageAssetTrackingTuple(const std::string& service, const std::string& plugin, const std::string& asset, const std::string& event, const bool& deprecated = false, const std::string& datapoints = "", const int& count = 0); ConfigCategories getChildCategories(const std::string& categoryName); HttpClient *getHttpClient(); bool addAuditEntry(const std::string& serviceName, const std::string& severity, const std::string& details); std::string& getRegistrationBearerToken() { std::lock_guard<std::mutex> guard(m_bearer_token_mtx); return m_bearer_token; }; void setNewBearerToken(const std::string& bearerToken) { std::lock_guard<std::mutex> guard(m_bearer_token_mtx); m_bearer_token = bearerToken; }; bool verifyBearerToken(BearerToken& token); bool verifyAccessBearerToken(BearerToken& bToken); bool verifyAccessBearerToken(std::shared_ptr<HttpServer::Request> request); bool refreshBearerToken(const std::string& currentToken, std::string& newToken); std::string& getBearerToken() { return m_bearer_token; }; bool addProxy(const std::string& serviceName, const std::string& operation, const std::string& publicEnpoint, const std::string& privateEndpoint); bool addProxy(const std::string& serviceName, const std::map<std::string, std::vector<std::pair<std::string, std::string> > >& endpoints); bool deleteProxy(const std::string& serviceName); const std::string getUrlbase() { return m_urlbase.str(); } ACL getACL(const std::string& aclName); AssetTrackingTuple* getAssetTrackingTuple(const std::string& serviceName, const std::string& assetName, const std::string& event); int validateDatapoints(std::string dp1, std::string dp2); AssetTrackingTable *getDeprecatedAssetTrackingTuples(); std::string getAlertByKey(const std::string& key); bool raiseAlert(const std::string& key, const std::string& message, const std::string& urgency="normal"); bool clearAlert(const std::string& key); private: std::ostringstream m_urlbase; std::map<std::thread::id, HttpClient *> m_client_map; HttpClient *m_client; std::string *m_uuid; Logger *m_logger; std::map<std::string, std::string> m_categories; // Bearer token returned by service registration // if the service startup token has been passed in registration payload std::string m_bearer_token; // Map of received and verified access bearer tokens from other microservices std::map<std::string, BearerToken> m_received_tokens; // m_received_tokens lock std::mutex m_mtx_rTokens; // m_client_map lock std::mutex m_mtx_client_map; // Get and set bearer token mutex std::mutex m_bearer_token_mtx; public: // member template must be here and not in .cpp file template<class T> bool addCategory(const T& t, bool keepOriginalItems = false) { try { std::string blockedCharacter = {}; if (!isValidIdentifier(t.getName(), blockedCharacter)) { m_logger->error("The category name %s contains %s invalid character(s).", blockedCharacter.c_str(), t.getName().c_str()); return false; } std::string url = "/fledge/service/category"; // Build the JSON payload std::ostringstream payload; payload << "{ \"key\" : \"" << JSONescape(t.getName()); payload << "\", \"description\" : \"" << JSONescape(t.getDescription()); if (! t.getDisplayName().empty() ) { payload << "\", \"display_name\" : \"" << JSONescape(t.getDisplayName()); } payload << "\", \"value\" : " << t.itemsToJSON(); /** * Note: * At the time being the keep_original_items is added into payload * and configuration manager in the Fledge handles it. * * In the near future keep_original_items will be passed * as URL modifier, i.e: 'URL?keep_original_items=true' */ if (keepOriginalItems) { url += "?keep_original_items=true"; } // Terminate JSON string payload << " }"; auto res = this->getHttpClient()->request("POST", url.c_str(), payload.str()); Document doc; std::string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { m_logger->error("Failed to parse result of adding a category: %s\n", response.c_str()); return false; } else if (doc.HasMember("message")) { m_logger->error("Failed to add configuration category: %s.", doc["message"].GetString()); return false; } else { return true; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Add config category failed %s.", e.what()); } return false; }; }; #endif ================================================ FILE: C/common/include/pipeline_debugger.h ================================================ #ifndef _PIPELINE_DEBUGGER_H #define _PIPELINE_DEBUGGER_H /* * Fledge filter pipeline debugger. * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <reading_set.h> #include <reading.h> #include <reading_circularbuffer.h> #include <mutex> #include <vector> #include <memory> /** * The debugger class for elements in a pipeline */ class PipelineDebugger { public: PipelineDebugger(); ~PipelineDebugger(); typedef enum debuggerActions { NoAction, Block } DebuggerActions; DebuggerActions process(ReadingSet *readingSet); void setBuffer(unsigned int size); void clearBuffer(); std::vector<std::shared_ptr<Reading>> fetchBuffer(); private: ReadingCircularBuffer *m_buffer; std::mutex m_bufferMutex; }; #endif ================================================ FILE: C/common/include/pipeline_element.h ================================================ #ifndef _PIPELINE_ELEMENT_H #define _PIPELINE_ELEMENT_H /* * Fledge filter pipeline elements. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <config_category.h> #include <management_client.h> #include <plugin.h> #include <plugin_manager.h> #include <plugin_data.h> #include <reading_set.h> #include <filter_plugin.h> #include <service_handler.h> #include <config_handler.h> #include <pipeline_debugger.h> class FilterPipeline; /** * The base pipeline element class */ class PipelineElement { public: PipelineElement() : m_next(NULL), m_storage(NULL), m_debugger(NULL) {}; virtual ~PipelineElement() {}; void setNext(PipelineElement *next) { m_next = next; }; PipelineElement *getNext() { return m_next; }; void setService(const std::string& serviceName) { m_serviceName = serviceName; }; void setStorage(StorageClient *storage) { m_storage = storage; }; bool attachDebugger(); void detachDebugger(); void setDebuggerBuffer(unsigned int size); std::vector<std::shared_ptr<Reading>> getDebuggerBuffer(); static void ingest(void *handle, READINGSET *readings) { ((PipelineElement *)handle)->ingest(readings); }; virtual bool setupConfiguration(ManagementClient * /* mgtClient */, std::vector<std::string>& /* children */) { return false; }; virtual bool isFilter() { return false; }; virtual bool isBranch() { return false; }; virtual void ingest(READINGSET *readingSet) = 0; virtual bool setup(ManagementClient *mgmt, void *ingest, std::map<std::string, PipelineElement*>& categories) = 0; virtual bool init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output) = 0; virtual void shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler) = 0; virtual void reconfigure(const std::string& /* newConfig */) { }; virtual std::string getName() = 0; virtual bool isReady() = 0; protected: std::string m_serviceName; PipelineElement *m_next; StorageClient *m_storage; PipelineDebugger *m_debugger; }; /** * A pipeline element the runs a filter plugin */ class PipelineFilter : public PipelineElement { public: PipelineFilter(const std::string& name, const ConfigCategory& filterDetails); ~PipelineFilter(); bool setupConfiguration(ManagementClient *mgtClient, std::vector<std::string>& children); void ingest(READINGSET *readingSet) { if (m_debugger) { PipelineDebugger::DebuggerActions action = m_debugger->process(readingSet); switch (action) { case PipelineDebugger::Block: delete readingSet; return; case PipelineDebugger::NoAction: break; } } if (m_plugin) { m_plugin->ingest(readingSet); } else { Logger::getLogger()->error("Pipeline filter %s has no plugin associated with it.", m_name.c_str()); } }; bool setup(ManagementClient *mgmt, void *ingest, std::map<std::string, PipelineElement*>& categories); bool init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output); void shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler); void reconfigure(const std::string& newConfig); bool isFilter() { return true; }; std::string getCategoryName() { return m_categoryName; }; bool persistData() { return m_plugin->persistData(); }; void setPluginData(PluginData *data) { m_plugin->m_plugin_data = data; }; std::string getPluginData() { return m_plugin->m_plugin_data->loadStoredData(m_serviceName + m_name); }; void setServiceName(const std::string& name) { m_serviceName = name; }; std::string getName() { return m_name; }; bool isReady() { return true; }; private: PLUGIN_HANDLE loadFilterPlugin(const std::string& filterName); private: std::string m_name; // The name of the filter instance std::string m_categoryName; std::string m_pluginName; PLUGIN_HANDLE m_handle; FilterPlugin *m_plugin; std::string m_serviceName; ConfigCategory m_updatedCfg; }; /** * A pipeline element that represents a branch in the pipeline */ class PipelineBranch : public PipelineElement { public: PipelineBranch(FilterPipeline *parent); ~PipelineBranch(); void ingest(READINGSET *readingSet); std::string getName() { return "Branch"; }; bool setupConfiguration(ManagementClient *mgtClient, std::vector<std::string>& children); bool setup(ManagementClient *mgmt, void *ingest, std::map<std::string, PipelineElement*>& categories); bool init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output); void shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler); bool isReady(); bool isBranch() { return true; }; std::vector<PipelineElement *>& getBranchElements() { return m_branch; }; void setFunctions(void *onward, void *use, void *ingest) { m_passOnward = onward; m_useData = use; m_ingest = ingest; }; private: static void branchHandler(void *instance); void handler(); private: std::vector<PipelineElement *> m_branch; std::thread *m_thread; std::queue<READINGSET *> m_queue; std::mutex m_mutex; std::condition_variable m_cv; void *m_passOnward; void *m_useData; void *m_ingest; bool m_shutdownCalled; FilterPipeline *m_pipeline; }; /** * A pipeline element that writes to a storage service or buffer */ class PipelineWriter : public PipelineElement { public: PipelineWriter(); std::string getName() { return "Writer"; }; void ingest(READINGSET *readingSet); bool setup(ManagementClient *mgmt, void *ingest, std::map<std::string, PipelineElement*>& categories); bool init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output); void shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler); bool isReady(); private: OUTPUT_STREAM m_useData; void *m_ingest; }; #endif ================================================ FILE: C/common/include/plugin_data.h ================================================ #ifndef _PLUGIN_DATA_H #define _PLUGIN_DATA_H /* * Fledge persist plugin data class. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <storage_client.h> class PluginData { public: PluginData(StorageClient* client); ~PluginData() {}; // Load data std::string loadStoredData(const std::string& key); // Store data bool persistPluginData(const std::string& key, const std::string& data, const std::string& service_name); private: StorageClient* m_storage; bool m_dataLoaded; }; #endif ================================================ FILE: C/common/include/process.h ================================================ #ifndef _PROCESS_H #define _PROCESS_H /* * Fledge process base class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <storage_client.h> #include <management_client.h> #include <audit_logger.h> #include <string.h> /** * Fledge process base class */ class FledgeProcess { public: FledgeProcess(int argc, char** argv); virtual ~FledgeProcess(); StorageClient* getStorageClient() const; ManagementClient* getManagementClient() const; Logger *getLogger() const; std::string getName() const { return m_name; }; time_t getStartTime() const { return m_stime; }; protected: std::string getArgValue(const std::string& name) const; bool m_dryRun; private: const time_t m_stime; // Start time const int m_argc; const char** m_arg_vals; // Fledge core management service details std::string m_name; int m_core_mngt_port; std::string m_core_mngt_host; ManagementClient* m_client; StorageClient* m_storage; Logger* m_logger; AuditLogger* m_auditLogger; }; #endif ================================================ FILE: C/common/include/purge_result.h ================================================ #ifndef _PURGE_RESULT_H #define _PURGE_RESULT_H /* * Fledge storage client. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <string.h> #include <sstream> #include <iostream> #include <reading.h> #include <rapidjson/document.h> #include <vector> /** */ class PurgeResult { public: PurgeResult() : m_removed(0), m_unsentPurged(0), m_unsentRetained(0), m_remaining(0) {}; PurgeResult(const std::string& json); unsigned long getRemoved() const { return m_removed; }; unsigned long getUnsentPurged() const { return m_unsentPurged; }; unsigned long getUnsentRetained() const { return m_unsentRetained; }; unsigned long getRemaining() const { return m_remaining; }; private: unsigned long m_removed; unsigned long m_unsentPurged; unsigned long m_unsentRetained; unsigned long m_remaining; }; #endif ================================================ FILE: C/common/include/pyruntime.h ================================================ #ifndef _PYRUNTIME_H #define _PYRUNTIME_H /* * Fledge Python Runtime. * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <Python.h> class PythonRuntime { public: static PythonRuntime *getPythonRuntime(); static bool initialised() { return m_instance != NULL; }; static void shutdown(); void execute(const std::string& python); PyObject *call(const std::string& name, const std::string& fmt, ...); PyObject *call(PyObject *module, const std::string& name, const std::string& fmt, ...); PyObject *importModule(const std::string& name); private: PythonRuntime(); ~PythonRuntime(); PythonRuntime(const PythonRuntime& rhs); PythonRuntime& operator=(const PythonRuntime& rhs); void logException(const std::string& name); static PythonRuntime *m_instance; }; #endif ================================================ FILE: C/common/include/pythonconfigcategory.h ================================================ #ifndef _PYTHONCONFIGCATEGORY_H #define _PYTHONCONFIGCATEGORY_H /* * Fledge Python Configuration Category * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <config_category.h> #include <Python.h> /** * A wrapper class for a ConfigCategory to convert to and from * Python objects. */ class PythonConfigCategory : public ConfigCategory { public: PythonConfigCategory(PyObject *pyConfig); PyObject *toPython(); private: PyObject *convertItem(CategoryItem *); }; #endif ================================================ FILE: C/common/include/pythonreading.h ================================================ #ifndef _PYTHONREADING_H #define _PYTHONREADING_H /* * Fledge Python Reading * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <reading.h> #include <Python.h> /** * A wrapper class for a Reading to convert to and from * Python objects. */ class PythonReading : public Reading { public: PythonReading(PyObject *pyReading); ~PythonReading() {}; PyObject *toPython(bool changeKeys = false, bool bytesString = false); static std::string errorMessage(); static bool isArray(PyObject *); static bool doneNumPyImport; private: PyObject *convertDatapoint(Datapoint *dp, bool bytesString = false); DatapointValue *getDatapointValue(PyObject *object); void fixQuoting(std::string& str); int InitNumPy(); }; #endif ================================================ FILE: C/common/include/pythonreadingset.h ================================================ #ifndef _PYTHON_READING_SET_H_ #define _PYTHON_READING_SET_H_ /* * Fledge Python Reading Set * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <reading_set.h> #include <Python.h> /** * A wrapper class for the ReadingSet class that allows conversion * to and from Python objects. */ class PythonReadingSet : public ReadingSet { public: PythonReadingSet(PyObject *pySet); ~PythonReadingSet() {}; PyObject *toPython(bool changeKeys = false); private: void setReadingAttr(Reading* newReading, PyObject *readingList, bool fillIfMissing); }; #endif ================================================ FILE: C/common/include/query.h ================================================ #ifndef _QUERY_H #define _QUERY_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <where.h> #include <aggregate.h> #include <sort.h> #include <join.h> #include <timebucket.h> #include <returns.h> #include <string> #include <vector> /** * Storage layer query container */ class Query { public: Query(Where *where); Query(Aggregate *aggreate, Where *where); Query(Timebucket *timebucket, Where *where); Query(Timebucket *timebucket, Where *where, unsigned int limit); Query(Returns *returns); Query(std::vector<Returns *> returns); Query(std::vector<Returns *> returns, Where *where); Query(std::vector<Returns *> returns, Where *where, unsigned int limit); ~Query(); void aggregate(Aggregate *aggegate); void group(const std::string& column); void sort(Sort *sort); void limit(unsigned int limit); void timebucket(Timebucket*); void returns(Returns *); void returns(std::vector<Returns *>); void distinct(); void join(Join *join); const std::string toJSON() const; private: Query(const Query&); // Disable copy of query Query& operator=(Query const&); Where *m_where; std::vector<Aggregate *> m_aggregates; std::string m_group; std::vector<Sort *> m_sort; unsigned int m_limit; Timebucket* m_timebucket; std::vector<Returns *> m_returns; bool m_distinct; Join *m_join; }; #endif ================================================ FILE: C/common/include/reading.h ================================================ #ifndef _READING_H #define _READING_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <datapoint.h> #include <string> #include <ctime> #include <vector> #include <sys/time.h> #include <rapidjson/document.h> #define DEFAULT_DATE_TIME_FORMAT "%Y-%m-%d %H:%M:%S" #define COMBINED_DATE_STANDARD_FORMAT "%Y-%m-%dT%H:%M:%S" #define ISO8601_DATE_TIME_FORMAT "%Y-%m-%d %H:%M:%S +0000" #define DATE_TIME_BUFFER_LEN 52 /** * An asset reading represented as a class. * * Each asset reading may have multiple datapoints to represent the * multiple values that maybe held within a complex asset. * * NB The timestamp data held for both the system timestamp and the * user timestamp are always held internally as UTC times */ class Reading { public: Reading(const std::string& asset, Datapoint *value); Reading(const std::string& asset, std::vector<Datapoint *> values); Reading(const std::string& asset, std::vector<Datapoint *> values, const std::string& ts); Reading(const std::string& asset, const std::string& datapoints); Reading(const Reading& orig); virtual ~Reading(); void addDatapoint(Datapoint *value); Datapoint *removeDatapoint(const std::string& name); Datapoint *getDatapoint(const std::string& name) const; std::string toJSON(bool minimal = false) const; std::string getDatapointsJSON() const; // Return AssetName const std::string& getAssetName() const { return m_asset; }; // Set AssetName void setAssetName(std::string assetName) { m_asset = assetName; }; unsigned int getDatapointCount() { return m_values.size(); }; void removeAllDatapoints(); // Return Reading datapoints const std::vector<Datapoint *> getReadingData() const { return m_values; }; // Return refrerence to Reading datapoints std::vector<Datapoint *>& getReadingData() { return m_values; }; bool hasId() const { return m_has_id; }; unsigned long getId() const { return m_id; }; unsigned long getTimestamp() const { return (unsigned long)m_timestamp.tv_sec; }; unsigned long getUserTimestamp() const { return (unsigned long)m_userTimestamp.tv_sec; }; void setId(unsigned long id) { m_id = id; }; void setTimestamp(unsigned long ts) { m_timestamp.tv_sec = (time_t)ts; m_timestamp.tv_usec = 0; }; void setTimestamp(struct timeval tm) { m_timestamp = tm; }; void setTimestamp(const std::string& timestamp); void getTimestamp(struct timeval *tm) { *tm = m_timestamp; }; void setUserTimestamp(unsigned long uTs) { m_userTimestamp.tv_sec = (time_t)uTs; m_userTimestamp.tv_usec = 0; }; void setUserTimestamp(struct timeval tm) { m_userTimestamp = tm; }; void setUserTimestamp(const std::string& timestamp); void getUserTimestamp(struct timeval *tm) { *tm = m_userTimestamp; }; typedef enum dateTimeFormat { FMT_DEFAULT, FMT_STANDARD, FMT_ISO8601, FMT_ISO8601MS } readingTimeFormat; void getFormattedDateTimeStr(const time_t *tv_sec, char *date_time, readingTimeFormat dateFormat) const; // Return Reading asset time - ts time const std::string getAssetDateTime(readingTimeFormat datetimeFmt = FMT_DEFAULT, bool addMs = true) const; // Return Reading asset time - user_ts time const std::string getAssetDateUserTime(readingTimeFormat datetimeFmt = FMT_DEFAULT, bool addMs = true) const; std::string substitute(const std::string& str); protected: Reading() {}; Reading& operator=(Reading const&); void stringToTimestamp(const std::string& timestamp, struct timeval *ts); const std::string escape(const std::string& str) const; std::vector<Datapoint *> *JSONtoDatapoints(const rapidjson::Value& json); unsigned long m_id; bool m_has_id; std::string m_asset; struct timeval m_timestamp; struct timeval m_userTimestamp; std::vector<Datapoint *> m_values; // Supported date time formats for 'm_timestamp' static std::vector<std::string> m_dateTypes; private: // Internal class used for macro substitution class Macro { public: Macro(const std::string& dpname, std::string::size_type s, const std::string& defValue) : start(s), name(dpname), def(defValue) { }; Macro(const std::string& dpname, std::string::size_type s) : start(s), name(dpname) { }; // Start of variable to substitute std::string::size_type start; // Name of variable to substitute std::string name; // Default value to substitute std::string def; }; void collectMacroInfo(const std::string& str, std::vector<Macro>& macros); }; #endif ================================================ FILE: C/common/include/reading_circularbuffer.h ================================================ #ifndef _READING_CIRCULARBUFFER_H #define _READING_CIRCULARBUFFER_H /* * Fledge Reading Circular Buffer. * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <reading.h> #include <mutex> #include <vector> #include <memory> /** * A circular buffer of readings. The buffer size is set in the constructor, * when it fills the oldest reading will be overwritten by new readings being * appended. * * The user can extract the current state at any point in historic order. */ class ReadingCircularBuffer { public: ReadingCircularBuffer(unsigned int size); ~ReadingCircularBuffer(); void insert(Reading *); void insert(const std::vector<Reading *>& readings); void insert(const std::vector<Reading *> *readings); int extract(std::vector<std::shared_ptr<Reading>>& vec); private: unsigned int m_size; std::mutex m_mutex; std::vector<std::shared_ptr<Reading>> m_readings; unsigned int m_insert; unsigned int m_entries; }; #endif ================================================ FILE: C/common/include/reading_set.h ================================================ #ifndef _READINGSET_H #define _READINGSET_H /* * Fledge storage client. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <string> #include <string.h> #include <sstream> #include <iostream> #include <reading.h> #include <rapidjson/document.h> #include <vector> /** * Reading set class * * A specialised container for a set of readings that allows * creation from a JSON document. */ class ReadingSet { public: ReadingSet(); ReadingSet(const std::string& json); ReadingSet(const std::vector<Reading *>* readings); virtual ~ReadingSet(); unsigned long getCount() const { return m_readings.size(); }; const Reading *operator[] (const unsigned int idx) { return m_readings[idx]; }; // Return the const reference of readings data const std::vector<Reading *>& getAllReadings() const { return m_readings; }; // Return the reference of readings std::vector<Reading *>* getAllReadingsPtr() { return &m_readings; }; // Remove readings from reading set and return reference to readings std::vector<Reading *>* moveAllReadings(); // Delete a reading from reading set and return pointer of deleted reading Reading* removeReading(unsigned long id); // Return the reading id of the last data element unsigned long getLastId() const { return m_last_id; }; unsigned long getReadingId(uint32_t pos); void append(ReadingSet *); void append(ReadingSet&); void append(std::vector<Reading *> &); void merge(std::vector<Reading *> *readings); void removeAll(); void clear(); bool copy(const ReadingSet& src); protected: unsigned long m_count; ReadingSet(const ReadingSet&); ReadingSet& operator=(ReadingSet const &); std::vector<Reading *> m_readings; // Id of last Reading element unsigned long m_last_id; // Id of the last Reading }; /** * JSONReading class * * A specialised reading class that allows creation from a JSON document */ class JSONReading : public Reading { public: JSONReading(const rapidjson::Value& json); ~JSONReading() {}; // Return the reading id unsigned long getId() const { return m_id; }; private: Datapoint *datapoint(const std::string& name, const rapidjson::Value& json); void escapeCharacter(std::string& stringToEvaluate, std::string pattern); }; class ReadingSetException : public std::exception { public: ReadingSetException(const char *what) { m_what = strdup(what); }; ~ReadingSetException() { if (m_what) free(m_what); }; virtual const char *what() const throw() { return m_what; }; private: char *m_what; }; #endif ================================================ FILE: C/common/include/reading_stream.h ================================================ #ifndef _READING_STREAM_H #define _READING_STREAM_H /* * Fledge storage reading stream protocol definitions. * * Copyright (c) 2019 Dianomic Systems Inc. * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #define RDS_CONNECTION_MAGIC 0x344f4e4e #define RDS_BLOCK_MAGIC 0x5244424b #define RDS_READING_MAGIC 0x52444947 #define RDS_ACK_MAGIC 0x4241434b #define RDS_NACK_MAGIC 0x4e41434b typedef struct { uint32_t magic; uint32_t token; } RDSConnectHeader; typedef struct { uint32_t magic; uint32_t blockNumber; uint32_t count; } RDSBlockHeader; typedef struct { uint32_t magic; uint32_t readingNo; uint32_t assetLength; uint32_t payloadLength; } RDSReadingHeader; typedef struct { uint32_t magic; uint32_t block; } RDSAcknowledge; typedef struct { uint32_t assetCodeLength; uint32_t payloadLength; struct timeval userTs; char assetCode[1]; } ReadingStream; #endif ================================================ FILE: C/common/include/readingset_circularbuffer.h ================================================ #ifndef _READINGSETCIRCULARBUFFER_H #define _READINGSETCIRCULARBUFFER_H /* * Fledge ReadingSet Circular Buffer. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <reading_set.h> #include <mutex> #include <vector> #include <memory> /** * Reading set circular buffer class * * Reading set circular buffer is a data structure to hold ReadingSet * passed to a plugin. */ class ReadingSetCircularBuffer { public: ReadingSetCircularBuffer(unsigned long maxBufferSize=10); ~ReadingSetCircularBuffer(); void insert(ReadingSet*); void insert(ReadingSet&); std::vector<std::shared_ptr<ReadingSet>> extract(bool isExtractSingleElement=true); private: std::mutex m_mutex; unsigned long m_maxBufferSize; unsigned long m_nextReadIndex; void appendReadingSet(const std::vector<Reading *>& readings); ReadingSetCircularBuffer (const ReadingSetCircularBuffer&) = delete; ReadingSetCircularBuffer& operator=(const ReadingSetCircularBuffer&) = delete; std::vector<std::shared_ptr<ReadingSet>> m_circularBuffer; }; #endif ================================================ FILE: C/common/include/resultset.h ================================================ #ifndef _RESULTSET_H #define _RESULTSET_H /* * Fledge storage client. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <string.h> #include <sstream> #include <iostream> #include <vector> #include <rapidjson/document.h> typedef enum column_type { INT_COLUMN = 1, NUMBER_COLUMN, STRING_COLUMN, BOOL_COLUMN, JSON_COLUMN, NULL_COLUMN } ColumnType; /** * Result set */ class ResultSet { public: class ColumnValue { public: ColumnValue(const std::string& value) { m_value.str = (char *)malloc(value.length() + 1); strncpy(m_value.str, value.c_str(), value.length() + 1); m_type = STRING_COLUMN; }; ColumnValue(const int value) { m_value.ival = value; m_type = INT_COLUMN; }; ColumnValue(const long value) { m_value.ival = value; m_type = INT_COLUMN; }; ColumnValue(const double value) { m_value.fval = value; m_type = NUMBER_COLUMN; }; ColumnValue(const rapidjson::Value& value) { m_doc = new rapidjson::Document(); rapidjson::Document::AllocatorType& a = m_doc->GetAllocator(); m_value.json = new rapidjson::Value(value, a); m_type = JSON_COLUMN; }; ~ColumnValue() { if (m_type == STRING_COLUMN) free(m_value.str); else if (m_type == JSON_COLUMN) { delete m_doc; delete m_value.json; } }; ColumnType getType() { return m_type; }; long getInteger() const; double getNumber() const; char *getString() const; const rapidjson::Value *getJSON() const { return m_value.json; }; private: ColumnValue(const ColumnValue&); ColumnValue& operator=(ColumnValue const&); ColumnType m_type; union { char *str; long ival; double fval; rapidjson::Value *json; } m_value; rapidjson::Document *m_doc; }; class Row { public: Row(ResultSet *resultSet) : m_resultSet(resultSet) {}; ~Row() { for (auto it = m_values.cbegin(); it != m_values.cend(); it++) delete *it; } void append(ColumnValue *value) { m_values.push_back(value); }; ColumnType getType(unsigned int column); ColumnType getType(const std::string& name); ColumnValue *getColumn(unsigned int column) const; ColumnValue *getColumn(const std::string& name) const; ColumnValue *operator[] (unsigned long colNo) const { return m_values[colNo]; }; private: Row(const Row&); Row& operator=(Row const&); std::vector<ResultSet::ColumnValue *> m_values; const ResultSet *m_resultSet; }; typedef std::vector<Row *>::iterator RowIterator; ResultSet(const std::string& json); ~ResultSet(); unsigned int rowCount() const { return m_rowCount; }; unsigned int columnCount() const { return m_columns.size(); }; const std::string& columnName(unsigned int column) const; ColumnType columnType(unsigned int column) const; ColumnType columnType(const std::string& name) const; RowIterator firstRow(); RowIterator nextRow(RowIterator it); bool isLastRow(RowIterator it) const; bool hasNextRow(RowIterator it) const; unsigned int findColumn(const std::string& name) const; const Row * operator[] (unsigned long rowNo) { return m_rows[rowNo]; }; private: ResultSet(const ResultSet &); ResultSet& operator=(ResultSet const&); class Column { public: Column(const std::string& name, ColumnType type) : m_name(name), m_type(type) {}; const std::string& getName() { return m_name; }; ColumnType getType() { return m_type; }; private: const std::string m_name; ColumnType m_type; }; unsigned int m_rowCount; std::vector<ResultSet::Column *> m_columns; std::vector<ResultSet::Row *> m_rows; }; class ResultException : public std::exception { public: ResultException(const char *what) { m_what = strdup(what); }; ~ResultException() { if (m_what) free(m_what); }; virtual const char *what() const throw() { return m_what; }; private: char *m_what; }; class ResultNoSuchColumnException : public std::exception { public: virtual const char *what() const throw() { return "Column does not exist"; } }; class ResultNoMoreRowsException : public std::exception { public: virtual const char *what() const throw() { return "No more rows in the result set"; } }; class ResultIncorrectTypeException : public std::exception { public: virtual const char *what() const throw() { return "No more rows in the result set"; } }; #endif ================================================ FILE: C/common/include/returns.h ================================================ #ifndef _RETURNS_H #define _RETURNS_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> /** * Control a returned column */ class Returns { public: Returns(const std::string& column) : m_column(column) {}; Returns(const std::string& column, const std::string& alias) : m_column(column), m_alias(alias) {}; Returns(const std::string& column, const std::string& alias, const std::string& format) : m_column(column), m_alias(alias), m_format(format) {}; ~Returns() {}; void format(const std::string format) { m_format = format; } void timezone(const std::string timezone) { m_timezone = timezone; } std::string toJSON() { std::ostringstream json; if ((! m_alias.empty()) || (! m_format.empty()) || (! m_timezone.empty())) { json << "{ "; json << "\"column\" : \"" << m_column << "\""; if (! m_alias.empty()) json << ", \"alias\" : \"" << m_alias << "\""; if (! m_format.empty()) json << ", \"format\" : \"" << m_format << "\""; if (! m_timezone.empty()) json << ", \"timezone\" : \"" << m_timezone << "\""; json << " }"; } else { json << "\"" << m_column << "\""; } return json.str(); } private: const std::string m_column; const std::string m_alias; std::string m_format; std::string m_timezone; }; #endif ================================================ FILE: C/common/include/service_record.h ================================================ #ifndef _SERVICE_RECORD_H #define _SERVICE_RECORD_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <json_provider.h> #include <string> class ServiceRecord : public JSONProvider { public: ServiceRecord(const std::string& name); ServiceRecord(const std::string& name, const std::string& type); ServiceRecord(const std::string& name, const std::string& type, const std::string& protocol, const std::string& address, const unsigned short port, const unsigned short managementPort, const std::string& token = ""); void asJSON(std::string &) const; const std::string& getName() const { return m_name; } const std::string& getType() const { return m_type; } void setAddress(const std::string& address) { m_address = address; } void setPort(const unsigned short port) { m_port = port; } void setProtocol(const std::string& protocol) { m_protocol = protocol; } const std::string& getProtocol() const { return m_protocol; } void setManagementPort(const unsigned short managementPort) { m_managementPort = managementPort; } const std::string& getAddress() { return m_address; } unsigned short getPort() { return m_port; } bool operator==(const ServiceRecord& b) const { return m_name.compare(b.m_name) == 0 && m_type.compare(b.m_type) == 0 && m_protocol.compare(b.m_protocol) == 0 && m_address.compare(b.m_address) == 0 && m_port == b.m_port && m_managementPort == b.m_managementPort; } private: std::string m_name; std::string m_type; std::string m_protocol; std::string m_address; unsigned short m_port; unsigned short m_managementPort; std::string m_token; // token set by core server at service start }; #endif ================================================ FILE: C/common/include/sort.h ================================================ #ifndef _SORT_H #define _SORT_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> /** * Sort clause in a selection of records */ class Sort { public: Sort(const std::string& column) : m_column(column), m_reverse(false) {}; Sort(const std::string& column, bool reverse) : m_column(column), m_reverse(reverse) {}; ~Sort() {}; std::string toJSON() { std::ostringstream json; json << "{ \"column\" : \"" << m_column << "\", "; json << "\"direction\" : \"" << (m_reverse ? "desc" : "asc") << "\" }"; return json.str(); } private: const std::string m_column; bool m_reverse; }; #endif ================================================ FILE: C/common/include/storage_client.h ================================================ #ifndef _STORAGE_CLIENT_H #define _STORAGE_CLIENT_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <client_http.hpp> #include <reading.h> #include <reading_set.h> #include <resultset.h> #include <purge_result.h> #include <query.h> #include <insert.h> #include <json_properties.h> #include <expression.h> #include <update_modifier.h> #include <logger.h> #include <string> #include <vector> #include <thread> using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; #define STREAM_BLK_SIZE 100 // Readings to send per write call to a stream #define STREAM_THRESHOLD 25 // Switch to streamed mode above this number of readings per second // Backup values for repeated storage client exception messages #define SC_INITIAL_BACKOFF 100 #define SC_MAX_BACKOFF 1000 #define DEFAULT_SCHEMA "fledge" class ManagementClient; /** * Client for accessing the storage service */ class StorageClient { public: StorageClient(HttpClient *client); StorageClient(const std::string& hostname, const unsigned short port); ~StorageClient(); ResultSet *queryTable(const std::string& schema, const std::string& tablename, const Query& query); ResultSet *queryTable(const std::string& tablename, const Query& query); ReadingSet *queryTableToReadings(const std::string& tableName, const Query& query); int insertTable(const std::string& schema, const std::string& tableName, const InsertValues& values); int insertTable(const std::string& schema, const std::string& tableName, const std::vector<InsertValues>& values); int insertTable(const std::string& tableName, const std::vector<InsertValues>& values); int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, const JSONProperties& json, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const JSONProperties& json, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, const ExpressionValues& values, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, std::vector<std::pair<ExpressionValues *, Where *>>& updates, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, const InsertValues& values, const ExpressionValues& expressoins, const Where& where, const UpdateModifier *modifier = NULL); int deleteTable(const std::string& schema, const std::string& tableName, const Query& query); int insertTable(const std::string& tableName, const InsertValues& values); int updateTable(const std::string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& tableName, const JSONProperties& json, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& tableName, const InsertValues& values, const JSONProperties& json, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& tableName, const ExpressionValues& values, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& tableName, std::vector<std::pair<ExpressionValues *, Where *>>& updates, const UpdateModifier *modifier = NULL); int updateTable(const std::string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where, const UpdateModifier *modifier = NULL); int updateTable(const std::string& schema, const std::string& tableName, std::vector<std::pair<InsertValue*, Where* > > &updates, const UpdateModifier *modifier); int updateTable(const std::string& tableName, std::vector<std::pair<InsertValue*, Where*> >& updates, const UpdateModifier *modifier = NULL); int deleteTable(const std::string& tableName, const Query& query); bool readingAppend(Reading& reading); bool readingAppend(const std::vector<Reading *> & readings); ResultSet *readingQuery(const Query& query); ReadingSet *readingQueryToReadings(const Query& query); ReadingSet *readingFetch(const unsigned long readingId, const unsigned long count); PurgeResult readingPurgeByAge(unsigned long age, unsigned long sent, bool purgeUnsent); PurgeResult readingPurgeBySize(unsigned long size, unsigned long sent, bool purgeUnsent); PurgeResult readingPurgeByAsset(const std::string& asset); bool registerAssetNotification(const std::string& assetName, const std::string& callbackUrl); bool unregisterAssetNotification(const std::string& assetName, const std::string& callbackUrl); bool registerTableNotification(const std::string& tableName, const std::string& key, std::vector<std::string> keyValues, const std::string& operation, const std::string& callbackUrl); bool unregisterTableNotification(const std::string& tableName, const std::string& key, std::vector<std::string> keyValues, const std::string& operation, const std::string& callbackUrl); void registerManagement(ManagementClient *mgmnt) { m_management = mgmnt; }; bool createSchema(const std::string&); bool deleteHttpClient(); private: void handleUnexpectedResponse(const char *operation, const std::string& table, const std::string& responseCode, const std::string& payload); void handleUnexpectedResponse(const char *operation, const std::string& responseCode, const std::string& payload); void handleException(const std::exception& ex, const char *operation, ...); HttpClient *getHttpClient(void); bool openStream(); bool streamReadings(const std::vector<Reading *> & readings); std::ostringstream m_urlbase; std::string m_host; std::map<std::thread::id, HttpClient *> m_client_map; std::map<std::thread::id, std::atomic<int>> m_seqnum_map; Logger *m_logger; pid_t m_pid; bool m_streaming; int m_stream; uint32_t m_readingBlock; std::string m_lastException; int m_exRepeat; int m_backoff; ManagementClient *m_management; }; #endif ================================================ FILE: C/common/include/string_utils.h ================================================ #ifndef _STRING_UTILS_H #define _STRING_UTILS_H /* * Fledge utilities functions for handling stringa * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli, Massimiliano Pinto */ #include <string> #include <sstream> #include <iomanip> void StringReplace(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringReplacement); void StringReplaceAll(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringReplacement); std::string StringSlashFix(const std::string& stringToFix); std::string evaluateParentPath(const std::string& path, char separator); std::string extractLastLevel(const std::string& path, char separator); void StringStripCRLF(std::string& StringToManage); std::string StringStripWhiteSpacesAll(const std::string& original); std::string StringStripWhiteSpacesExtra(const std::string& original); void StringStripQuotes(std::string& StringToManage); std::string urlEncode(const std::string& s); std::string urlDecode(const std::string& s); void StringEscapeQuotes(std::string& s); char *trim(char *str); std::string StringLTrim(const std::string& str); std::string StringRTrim(const std::string& str); std::string StringTrim(const std::string& str); bool IsRegex(const std::string &str); std::string StringAround(const std::string& str, unsigned int pos, unsigned int after = 30, unsigned int before = 10); void StringReplaceAllEx(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringToChange); std::string escape(const std::string& str); #endif ================================================ FILE: C/common/include/timebucket.h ================================================ #ifndef _TIMEBUCKET_H #define _TIMEBUCKET_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> /** * Timebucket clause in a selection of records */ class Timebucket { public: Timebucket(const std::string& column, unsigned int size, const std::string& format, const std::string& alias) : m_column(column), m_size(size), m_format(format), m_alias(alias) {}; Timebucket(const std::string& column, unsigned int size, const std::string& format) : m_column(column), m_size(size), m_format(format), m_alias(column) {}; ~Timebucket() {}; std::string toJSON() { std::ostringstream json; json << "{ \"timestamp\" : \"" << m_column << "\", "; json << "\"size\" : \"" << m_size << "\", "; json << "\"format\" : \"" << m_format << "\", "; json << "\"alias\" : \"" << m_alias << "\" }"; return json.str(); } private: const std::string m_column; unsigned int m_size; const std::string m_format; const std::string m_alias; }; #endif ================================================ FILE: C/common/include/update_modifier.h ================================================ #ifndef _UPDATE_MODIFIER_H #define _UPDATE_MODIFIER_H /* * Fledge storage client. * * Copyright (c) 2022 Dianonic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> /** * Update modifier */ class UpdateModifier { public: UpdateModifier(const std::string& modifier) : m_modifier(modifier) { }; ~UpdateModifier(); const std::string toJSON() const { return m_modifier; }; private: UpdateModifier(const UpdateModifier&); UpdateModifier& operator=(UpdateModifier const&); const std::string m_modifier; }; #endif ================================================ FILE: C/common/include/utils.h ================================================ #ifndef _FLEDGE_UTILS_H #define _FLEDGE_UTILS_H /* * Fledge general utilities * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <string> #include <algorithm> #include <vector> #define _FLEDGE_ROOT_PATH "/usr/local/fledge" using namespace std; /** * Return Fledge root dir * * Return current value of FLEDGE_ROOT env var or * default path _FLEDGE_ROOT_PATH * * @return Return Fledge root dir */ static const string getRootDir() { const char* rootDir = getenv("FLEDGE_ROOT"); return (rootDir ? string(rootDir) : string(_FLEDGE_ROOT_PATH)); } /** * Return Fledge data dir * * Return current value of FLEDGE_DATA env var or * default value: getRootDir + /data * * @return Return Fledge data dir */ static const string getDataDir() { const char* dataDir = getenv("FLEDGE_DATA"); return (dataDir ? string(dataDir) : string(getRootDir() + "/data")); } /** * @brief Constructs the path for the debug-trace subdirectory in the Fledge data directory. * * @return A string representing the path to the debug-trace directory. */ static std::string getDebugTracePath() { return getDataDir() + "/logs/debug-trace"; } /** * @brief Converts a string representation of a boolean value to a boolean type. * * This function takes a string input and checks if it represents a boolean value. * It recognizes "true", "1", and their case-insensitive variants as true. * Any other string will be interpreted as false. * * @param str The string to convert to a boolean. Can be "true", "false", "1", "0", etc. * @return true if the input string represents a true value; false otherwise. * * @note This function is case-insensitive and will convert the input string to lowercase * before comparison. * * @example * bool result1 = stringToBool("True"); // result1 is true * bool result2 = stringToBool("false"); // result2 is false * bool result3 = stringToBool("1"); // result3 is true * bool result4 = stringToBool("0"); // result4 is false */ static bool stringToBool(const std::string& str) { std::string lowerStr = str; std::transform(lowerStr.begin(), lowerStr.end(), lowerStr.begin(), ::tolower); return (lowerStr == "true" || lowerStr == "1"); } /** * @brief Validates if a given string is a valid identifier. * A valid identifier is defined as a string that does not contain any disallowed characters. * * @param str The string to validate as an identifier. * @param blockedCharacter A reference to a string that will be set to the first disallowed character found in the input string, if any. * @return true if the string is a valid identifier; false otherwise. */ static bool isValidIdentifier(const std::string& str, std::string& blockedCharacter) { if (str.empty()) return false; // Check for disallowed characters static const std::vector<std::string> disallowed_characters = { "\\" }; for (const auto& ch : disallowed_characters) { if (str.find(ch) != std::string::npos) { blockedCharacter = ch; return false; } } return true; } #endif ================================================ FILE: C/common/include/value.h ================================================ #ifndef _VALUE_H #define _VALUE_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <sstream> #include <iostream> /** * A value in an update statement */ class UpdateValue { public: enum UpdateType { StringType, IntType, DoubleType, JSONType }; UpdateValue(const std::string& column, const std::string& value) : m_column(column), m_value.str(value), m_type(UpdateValue::StringType) {}; UpdateValue(const std::string& column, const int value) : m_column(column), m_value.ival(value), m_type(UpdateValue::IntType) {}; UpdateValue(const std::string& column, const double value) : m_column(column), m_value.fval(value), m_type(UpdateValue::DoubleType) {}; ~UpdateValue() {}; std::string toJSON() { std::ostringstream json; json << "\"" << m_column << "\" : "; switch (m_type) { case UpdateValue::StringType: json << "\"" << m_value.str << "\""; break; case UpdateValue::IntType: json << m_value.ival; break; case UpdateValue::DoubleType: json << m_value.fval; break; case UpdateValue::JSONType: json << m_value.str; break; } return json.str(); } private: const std::string m_column; enum UpdateType m_type; union { std::string str; int ival; double fval; } m_value; }; #endif ================================================ FILE: C/common/include/where.h ================================================ #ifndef _WHERE_H #define _WHERE_H /* * Fledge storage client. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <string> #include <vector> #include <stdexcept> typedef enum Conditional { Older, Newer, Equals, NotEquals, GreaterThan, LessThan, In, IsNull, NotNull } Condition; /** * Where clause in a selection of records */ class Where { public: Where(const std::string& column, const Condition condition, const std::string& value) : m_column(column), m_condition(condition), m_and(0), m_or(0) { if (condition != In) { m_value = value; } else { m_in.push_back(value); } }; Where(const std::string& column, const Condition condition, const std::string& value, Where *andCondition) : m_column(column), m_condition(condition), m_and(andCondition), m_or(0) { if (condition != In) { m_value = value; } else { m_in.push_back(value); } }; Where(const std::string& column, const Condition condition) : m_column(column), m_condition(condition), m_and(0), m_or(0) { if (condition != IsNull && condition != NotNull) { throw std::runtime_error("Missing value in where clause"); } }; Where(const std::string& column, const Condition condition, Where *andCondition) : m_column(column), m_condition(condition), m_and(andCondition), m_or(0) { if (condition != IsNull && condition != NotNull) { throw std::runtime_error("Missing value in where clause"); } }; ~Where(); void andWhere(Where *condition) { m_and = condition; }; void orWhere(Where *condition) { m_or = condition; }; void addIn(const std::string& value) { if (m_condition == In) { m_in.push_back(value); } }; const std::string toJSON() const; private: Where(const Where&); Where& operator=(Where const&); const std::string m_column; const Condition m_condition; std::string m_value; Where *m_and; Where *m_or; std::vector<std::string> m_in; }; #endif ================================================ FILE: C/common/join.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <join.h> #include <query.h> #include <sstream> #include <iostream> using namespace std; /** * Destructor fo rthe join clause */ Join::~Join() { delete m_query; } /** * Convert a join clause to its JSON representation * * @return string The JSON form of the join */ const string Join::toJSON() const { ostringstream json; bool first = true; json << " \"join\" : {"; json << "\"table\" : { \"name\" : \"" << m_table << "\", "; json << "\"column\" : \"" << m_column << "\" }, "; json << "\"on\" : \"" << m_on << "\", "; json << "\"query\" : " << m_query->toJSON(); json << " }"; return json.str(); } ================================================ FILE: C/common/json_utils.cpp ================================================ /* * Fledge utilities functions for handling JSON document * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <iostream> #include <string> #include <vector> #include "json_utils.h" #include "rapidjson/document.h" using namespace std; using namespace rapidjson; /** * Processes a string containing an array in JSON format and loads a vector of string * * @param vectorString vector of string used by reference in which the JSON array will be loaded * @param JSONString string containing an array in JSON format * @param Key key of the JSON from which the array should be evaluated * */ bool JSONStringToVectorString(std::vector<std::string>& vectorString, const std::string& JSONString, const std::string& Key) { bool success = true; Document JSONdoc; JSONdoc.Parse(JSONString.c_str()); if (JSONdoc.HasParseError()) { success = false; } else if (!JSONdoc.HasMember(Key.c_str())) { success = false; } else if (!JSONdoc[Key.c_str()].IsArray()) { success = false; } if (success) { const Value &filterList = JSONdoc[Key.c_str()]; if (!filterList.Size()) { success = false; } else { for (Value::ConstValueIterator itr = filterList.Begin(); itr != filterList.End(); ++itr) { vectorString.emplace_back(itr->GetString()); } } } return success; } string JSONescape(const std::string& subject) { size_t pos = 0; string replace("\\\""); string escaped = subject; while ((pos = escaped.find("\"", pos)) != std::string::npos) { escaped.replace(pos, 1, replace); pos += replace.length(); } return escaped; } /** * Return unescaped version of a JSON string * * Routine removes \" inside the string * and leading and trailing " * * @param input Input string * @return Unescaped string */ std::string JSONunescape(const std::string& input) { std::string output; size_t inputSize = input.size(); output.reserve(inputSize); for (size_t i = 0; i < inputSize; ++i) { // skip leading or trailing " if ((i == 0 || i == inputSize -1) && input[i] == '"') { continue; } // \\\" -> \" if (input[i] == '\\' && i + 3 < inputSize && input[i + 1] == '\\' && input[i + 2] == '\\' && input[i + 3] == '"') { output.push_back('\\'); output.push_back('"'); i += 3; } // \\" -> \" // \" -> " else if (input[i] == '\\' && i + 1 < inputSize && input[i + 1] == '"') { output.push_back('"'); ++i; } else { output.push_back(input[i]); } } return output; } ================================================ FILE: C/common/logger.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <logger.h> #include <stdio.h> #include <unistd.h> #include <syslog.h> #include <stdarg.h> #include <memory> #include <string.h> #include <sys/time.h> #include <sys/socket.h> #include <exception> #include <arpa/inet.h> #include <stdexcept> #include <algorithm> using namespace std; // uncomment line below to get uSec level timestamps // #define ADD_USEC_TS const char * DEFALUT_LOG_IP = "127.0.0.1"; const int DEFAULT_LOG_PORT = 5140; inline long getCurrTimeUsec() { struct timeval m_timestamp; gettimeofday(&m_timestamp, NULL); return m_timestamp.tv_usec; } /** * The singleton pointer */ Logger *Logger::instance = 0; /** * Constructor for the Logger class. * * @param application The application name */ Logger::Logger(const string& application) : m_runWorker(true), m_workerThread(NULL) { static char ident[80]; if (instance) { instance->error("Attempt to create second singleton instance, original application name %s, current attempt made by %s", ident, application.c_str()); throw runtime_error("Attempt to create secnd Logger instance"); } /* Prepend "Fledge " in all cases other than Fledge itself and Fledge Storage. */ if (application.compare("Fledge") != 0 && application.compare("Fledge Storage") != 0) { snprintf(ident, sizeof(ident), "Fledge %s", application.c_str()); } else { strncpy(ident, application.c_str(), sizeof(ident)); } // Check if SYSLOG_UDP_ENABLED is set via environment variable const char* udpEnabledEnv = std::getenv("SYSLOG_UDP_ENABLED"); m_SyslogUdpEnabled = false; auto toLower = [](const std::string& s) { std::string out = s; std::transform(out.begin(), out.end(), out.begin(), [](unsigned char c) { return std::tolower(c); }); return out; }; if (udpEnabledEnv != nullptr && toLower(std::string(udpEnabledEnv)) == "true") { m_SyslogUdpEnabled = true; } if(m_SyslogUdpEnabled) { // Check LOG_IP and LOG_PORT from environment variables with default values const char* logIpEnv = std::getenv("LOG_IP"); const char* logPortEnv = std::getenv("LOG_PORT"); std::string logIp = logIpEnv ? logIpEnv : DEFALUT_LOG_IP; // Default to 127.0.0.1 int logPort = logPortEnv ? std::atoi(logPortEnv) : DEFAULT_LOG_PORT; // Default to port 5140 // Initialize the UDP socket m_UdpSockFD = socket(AF_INET, SOCK_DGRAM, 0); if (m_UdpSockFD >= 0) { memset(&m_UdpServerAddr, 0, sizeof(m_UdpServerAddr)); m_UdpServerAddr.sin_family = AF_INET; m_UdpServerAddr.sin_port = htons(logPort); // Use the port from LOG_PORT or default if (inet_pton(AF_INET, logIp.c_str(), &m_UdpServerAddr.sin_addr) <= 0) { throw std::runtime_error("Invalid LOG_IP address"); } } else { throw std::runtime_error("Failed to create UDP socket"); } if(m_SyslogUdpEnabled) { char hostname[256]; // Buffer to store the hostname // Retrieve the hostname (localhost name) if (gethostname(hostname, sizeof(hostname)) != 0) { // Fallback in case of failure to retrieve hostname strncpy(hostname, "localhost", sizeof(hostname) - 1); hostname[sizeof(hostname) - 1] = '\0'; } m_hostname = hostname; } } else { // Warning: these flags should be updated with caution because the `m_identifier` used in UDP when `m_SyslogUdpEnabled` is `true` may break openlog(ident, LOG_PID|LOG_CONS, LOG_USER); } instance = this; m_level = LOG_WARNING; m_identifier = ident; } /** * Destructor for the logger class. */ Logger::~Logger() { // Stop the getLogger() call returning a deleted instance if (instance == this) instance = NULL; else if (!instance) return; // Already destroyed m_runWorker = false; m_condition.notify_one(); if (m_workerThread && m_workerThread->joinable()) { m_workerThread->join(); delete m_workerThread; m_workerThread = NULL; } if(!m_SyslogUdpEnabled) { closelog(); } else { if (m_UdpSockFD >= 0) { close(m_UdpSockFD); m_UdpSockFD = -1; } } } /** * Send a message to the UDP sink if enabled * * @param msg The message to send */ void Logger::sendToUdpSink(const std::string& msg) { if (m_UdpSockFD >= 0) { sendto(m_UdpSockFD, msg.c_str(), msg.size(), 0, (struct sockaddr*)&m_UdpServerAddr, sizeof(m_UdpServerAddr)); } } /** * Return the singleton instance of the logger class. */ Logger *Logger::getLogger() { if (!instance) { // Any service should have already created the logger // for the service. If not then create the default logger // and clearly identify this. We should ideally avoid // the use of a default as this will not identify the // source of the log message. instance = new Logger("(default)"); } return instance; } /** * Set the minimum level of logging to write to syslog. * * @param level The minimum, inclusive, level of logging to write */ void Logger::setMinLevel(const string& level) { if (level.compare("info") == 0) { setlogmask(LOG_UPTO(LOG_INFO)); levelString = level; m_level = LOG_INFO; } else if (level.compare("warning") == 0) { setlogmask(LOG_UPTO(LOG_WARNING)); levelString = level; m_level = LOG_WARNING; } else if (level.compare("debug") == 0) { setlogmask(LOG_UPTO(LOG_DEBUG)); levelString = level; m_level = LOG_DEBUG; } else if (level.compare("error") == 0) { setlogmask(LOG_UPTO(LOG_ERR)); levelString = level; m_level = LOG_ERR; } else { error("Request to set unsupported log level %s", level.c_str()); } } /** * Register a callback function to be called when * a log message is written that matches the specification * given. * * Note: The callback functions are called on a separate thread. * This worker thread is only created when the first callback is * registered. * * @param level The level that must be matched * @param callback The funtion to be called * @param userData User date to pass to the callback function * @return bool Return true if the callback was registered */ bool Logger::registerInterceptor(LogLevel level, LogInterceptor callback, void* userData) { // Do not register the interceptor if callback function is null if (callback == nullptr) { return false; } std::lock_guard<std::mutex> lock(m_interceptorMapMutex); if (m_workerThread == NULL) { m_workerThread = new std::thread(&Logger::workerThread, this); } auto it = m_interceptors.emplace(level, InterceptorData{callback, userData}); if (it != m_interceptors.end()) { return true; } return false; } /** * Remove the registration of a previously registered callback * * @param level The matching log level for the callback * @param callback The callback to unregister * @return bool True if the callback was unregistered. */ bool Logger::unregisterInterceptor(LogLevel level, LogInterceptor callback) { std::lock_guard<mutex> lock(m_interceptorMapMutex); auto range = m_interceptors.equal_range(level); for (auto it = range.first; it != range.second; ++it) { if (it->second.callback == callback) { m_interceptors.erase(it); return true; } } return false; } /** * Queue the execution of a callback when a log message is received * that matches a registered callback * * @param level The log level * @param message The log message */ void Logger::executeInterceptor(LogLevel level, const std::string& message) { std::lock_guard<mutex> lock(m_interceptorMapMutex); auto range = m_interceptors.equal_range(level); for (auto it = range.first; it != range.second; ++it) { std::lock_guard<mutex> lock(m_queueMutex); m_taskQueue.push({level, message, it->second.callback, it->second.userData}); } m_condition.notify_one(); } /** * The worker thread that processes intercepted log messages and * calls the callback function to handle them */ void Logger::workerThread() { while (m_runWorker) { std::unique_lock<mutex> lock(m_queueMutex); m_condition.wait(lock, [this] { return !m_taskQueue.empty() || !m_runWorker; }); while (!m_taskQueue.empty()) { if(!m_runWorker) //Exit immediately during shutdown { return; } LogTask task = m_taskQueue.front(); m_taskQueue.pop(); lock.unlock(); if (task.callback) { task.callback(task.level, task.message, task.userData); } lock.lock(); } } } /** * Log a message at the level debug * * @param msg A printf format string * @param ... The variable arguments required by the printf format */ void Logger::debug(const string& msg, ...) { va_list args; va_start(args, msg); // Use the unified log function with the "DEBUG" level log(LOG_DEBUG, "DEBUG", LogLevel::DEBUG, msg, args); va_end(args); } /** * Log a long string across multiple syslog entries * * @param s The string to log * @param level level to log the string at */ void Logger::printLongString(const string& s, LogLevel level) { const int charsPerLine = 950; int len = s.size(); const char *cstr = s.c_str(); for (int i=0; i<(len+charsPerLine-1)/charsPerLine; i++) { switch (level) { case LogLevel::FATAL: this->fatal("%.*s%s", charsPerLine, cstr+i*charsPerLine, len - i > charsPerLine ? "..." : ""); break; case LogLevel::ERROR: this->error("%.*s%s", charsPerLine, cstr+i*charsPerLine, len - i > charsPerLine ? "..." : ""); break; case LogLevel::WARNING: this->warn("%.*s%s", charsPerLine, cstr+i*charsPerLine, len - i > charsPerLine ? "..." : ""); break; case LogLevel::INFO: this->info("%.*s%s", charsPerLine, cstr+i*charsPerLine, len - i > charsPerLine ? "..." : ""); break; case LogLevel::DEBUG: default: this->debug("%.*s%s", charsPerLine, cstr+i*charsPerLine, len - i > charsPerLine ? "..." : ""); break; } } } /** * Log a message at the level info * * @param msg A printf format string * @param ... The variable arguments required by the printf format */ void Logger::info(const std::string& msg, ...) { va_list args; va_start(args, msg); // Use the unified log function with the "INFO" level log(LOG_INFO, "INFO", LogLevel::INFO, msg, args); va_end(args); } /** * Log a message at the level warn * * @param msg A printf format string * @param ... The variable arguments required by the printf format */ void Logger::warn(const string& msg, ...) { va_list args; va_start(args, msg); // Use the unified log function with the "WARNING" level log(LOG_WARNING, "WARNING", LogLevel::WARNING, msg, args); va_end(args); } /** * Log a message at the level error * * @param msg A printf format string * @param ... The variable arguments required by the printf format */ void Logger::error(const string& msg, ...) { va_list args; va_start(args, msg); // Use the unified log function with the "ERROR" level log(LOG_ERR, "ERROR", LogLevel::ERROR, msg, args); va_end(args); } /** * Log a message at the level fatal * * @param msg A printf format string * @param ... The variable arguments required by the printf format */ void Logger::fatal(const string& msg, ...) { va_list args; va_start(args, msg); // Use the unified log function with the "FATAL" level log(LOG_CRIT, "FATAL", LogLevel::FATAL, msg, args); va_end(args); } /** * Log a message at the specified level * * @param sysLogLvl The syslog level to use * @param lvlName The name of the log level * @param appLogLvl The application log level * @param msg A printf format string * @param ... The variable arguments required by the printf format */ void Logger::log(int sysLogLvl, const char * lvlName, LogLevel appLogLvl, const std::string& msg, va_list args) { // Check if the current log level allows messages if (m_level < sysLogLvl) { return; } constexpr size_t MAX_BUFFER_SIZE = 1024; // Maximum allowed log size char buffer[MAX_BUFFER_SIZE]; // Stack-allocated buffer for formatting int copied = 0; if(m_SyslogUdpEnabled) { // Add the identifier to the message in case udp copied = snprintf(buffer, sizeof(buffer), "%s %s[%d]: ", m_hostname.c_str(), m_identifier.c_str(), getpid()); } #ifdef ADD_USEC_TS copied += snprintf(buffer + copied, sizeof(buffer) - copied, "[.%06ld] %s: ", getCurrTimeUsec(), lvlName); #else copied += snprintf(buffer + copied, sizeof(buffer) - copied, "%s: ", lvlName); #endif // Format the log message using vsnprintf vsnprintf(buffer + copied, sizeof(buffer) - copied, msg.c_str(), args); if(m_SyslogUdpEnabled) { // Send the message to the UDP sink sendToUdpSink(buffer); } else { syslog(sysLogLvl, "%s", buffer); } // Execute interceptors if any are present if (!m_interceptors.empty()) { executeInterceptor(appLogLvl, buffer); } } ================================================ FILE: C/common/management_client.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017-2021 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <management_client.h> #include <rapidjson/document.h> #include <service_record.h> #include <string_utils.h> #include <asset_tracking.h> #include <bearer_token.h> #include <crypto.hpp> #include <rapidjson/error/en.h> using namespace std; using namespace rapidjson; using namespace SimpleWeb; using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; /** * Management Client constructor. Creates a class used to send management API requests * from a micro service to the Fledge core service. * * The parameters required here are passed to new services and tasks using the --address= * and --port= arguments when the service is started. * * @param hostname The hostname of the Fledge core micro service * @param port The port of the management service API listener in the Fledge core */ ManagementClient::ManagementClient(const string& hostname, const unsigned short port) : m_uuid(0) { ostringstream urlbase; m_logger = Logger::getLogger(); m_urlbase << hostname << ":" << port; } /** * Destructor for management client */ ManagementClient::~ManagementClient() { std::map<std::thread::id, HttpClient *>::iterator item; if (m_uuid) { delete m_uuid; m_uuid = 0; } // Deletes all the HttpClient objects created in the map for (item = m_client_map.begin() ; item != m_client_map.end() ; ++item) { delete item->second; } } /** * Creates a HttpClient object for each thread * it stores/retrieves the reference to the HttpClient and the associated thread id in a map * * @return HttpClient The HTTP client connection to the core */ HttpClient *ManagementClient::getHttpClient() { std::map<std::thread::id, HttpClient *>::iterator item; HttpClient *client; std::thread::id thread_id = std::this_thread::get_id(); m_mtx_client_map.lock(); item = m_client_map.find(thread_id); if (item == m_client_map.end() ) { // Adding a new HttpClient client = new HttpClient(m_urlbase.str()); m_client_map[thread_id] = client; } else { client = item->second; } m_mtx_client_map.unlock(); return (client); } /** * Register this service with the Fledge core * * @param service The service record of this service * @return bool True if the service registration was sucessful */ bool ManagementClient::registerService(const ServiceRecord& service) { string payload; try { service.asJSON(payload); auto res = this->getHttpClient()->request("POST", "/fledge/service", payload); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s service registration: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); return false; } if (doc.HasMember("id")) { m_uuid = new string(doc["id"].GetString()); m_logger->info("Registered service '%s' with UUID %s.\n", service.getName().c_str(), m_uuid->c_str()); if (doc.HasMember("bearer_token")){ m_bearer_token = string(doc["bearer_token"].GetString()); #ifdef DEBUG_BEARER_TOKEN m_logger->debug("Bearer token issued for service '%s': %s", service.getName().c_str(), m_bearer_token.c_str()); #endif } return true; } else if (doc.HasMember("message")) { m_logger->error("Failed to register service: %s.", doc["message"].GetString()); } else { m_logger->error("Unexpected result from service registration %s", response.c_str()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Register service failed %s.", e.what()); return false; } return false; } /** * Unregister this service with the Fledge core * * @return bool True if the service successfully unregistered */ bool ManagementClient::unregisterService() { if (!m_uuid) { return false; // Not registered } try { string url = "/fledge/service/"; url += urlEncode(*m_uuid); auto res = this->getHttpClient()->request("DELETE", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s service unregistration: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); return false; } if (doc.HasMember("id")) { delete m_uuid; m_uuid = new string(doc["id"].GetString()); m_logger->info("Unregistered service %s.\n", m_uuid->c_str()); return true; } else if (doc.HasMember("message")) { m_logger->error("Failed to unregister service: %s.", doc["message"].GetString()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Unregister service failed %s.", e.what()); return false; } return false; } /** * Restart this service by sendign a request to the Fledge core * * @return bool True if the service successfully requested restart */ bool ManagementClient::restartService() { if (!m_uuid) { return false; // Not registered } try { string url = "/fledge/service/"; url += urlEncode(*m_uuid); url += "/restart"; auto res = this->getHttpClient()->request("PUT", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s service restart: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); return false; } if (doc.HasMember("id")) { delete m_uuid; m_uuid = new string(doc["id"].GetString()); m_logger->info("Restart service %s.\n", m_uuid->c_str()); return true; } else if (doc.HasMember("message")) { m_logger->error("Failed to restart service: %s.", doc["message"].GetString()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Restart service failed %s.", e.what()); return false; } return false; } /** * Get the specified service. Supplied with a service * record that must either have the name or the type fields populated. * The call will populate the other fields of the service record. * * Note, if multiple service records match then only the first will be * returned. * * @param service A partially filled service record that will be completed * @return bool Return true if the service record was found */ bool ManagementClient::getService(ServiceRecord& service) { string payload; try { string url = "/fledge/service"; if (!service.getName().empty()) { url += "?name=" + urlEncode(service.getName()); } else if (!service.getType().empty()) { url += "?type=" + urlEncode(service.getType()); } auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching service record: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); return false; } else if (doc.HasMember("message")) { m_logger->error("Failed to register service: %s.", doc["message"].GetString()); return false; } else { Value& serviceRecord = doc["services"][0]; service.setAddress(serviceRecord["address"].GetString()); service.setPort(serviceRecord["service_port"].GetInt()); service.setProtocol(serviceRecord["protocol"].GetString()); service.setManagementPort(serviceRecord["management_port"].GetInt()); return true; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get service failed %s.", e.what()); return false; } return false; } /** * Return all services registered with the Fledge core * * @param services A vector of service records that will be populated * @return bool True if the vecgtor was populated */ bool ManagementClient::getServices(vector<ServiceRecord *>& services) { string payload; try { string url = "/fledge/service"; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching service record: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); return false; } else if (doc.HasMember("message")) { m_logger->error("Failed to register service: %s.", doc["message"].GetString()); return false; } else { Value& records = doc["services"]; for (auto& serviceRecord : records.GetArray()) { ServiceRecord *service = new ServiceRecord(serviceRecord["name"].GetString(), serviceRecord["type"].GetString()); service->setAddress(serviceRecord["address"].GetString()); service->setPort(serviceRecord["service_port"].GetInt()); service->setProtocol(serviceRecord["protocol"].GetString()); service->setManagementPort(serviceRecord["management_port"].GetInt()); services.push_back(service); } return true; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get services failed %s.", e.what()); return false; } return false; } /** * Return all services registered with the Fledge core of a specified type * * @param services A vector of service records that will be populated * @param type The type of services to return * @return bool True if the vecgtor was populated */ bool ManagementClient::getServices(vector<ServiceRecord *>& services, const string& type) { string payload; try { string url = "/fledge/service?type="; url += type; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching service record: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); return false; } else if (doc.HasMember("message")) { m_logger->error("Failed to register service: %s.", doc["message"].GetString()); return false; } else { Value& records = doc["services"]; for (auto& serviceRecord : records.GetArray()) { ServiceRecord *service = new ServiceRecord(serviceRecord["name"].GetString(), serviceRecord["type"].GetString()); service->setAddress(serviceRecord["address"].GetString()); service->setPort(serviceRecord["service_port"].GetInt()); service->setProtocol(serviceRecord["protocol"].GetString()); service->setManagementPort(serviceRecord["management_port"].GetInt()); services.push_back(service); } return true; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get services failed %s.", e.what()); return false; } return false; } /** * Register interest in a configuration category. The service will be called * with the updated configuration category whenever an item in the category * is added, removed or changed. * * @param category The name of the category to register * @return bool True if the registration was succesful */ bool ManagementClient::registerCategoryChild(const string& category) { ostringstream convert; if (m_uuid == 0) { // Not registered with core m_logger->error("Service is not registered with the core - not registering configuration interest"); return true; } try { convert << "{ \"category\" : \"" << JSONescape(category) << "\", "; convert << "\"child\" : \"" << "True" << "\", "; convert << "\"service\" : \"" << *m_uuid << "\" }"; auto res = this->getHttpClient()->request("POST", "/fledge/interest", convert.str()); Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s child category registration: %s\n", httpError?"HTTP error during":"Failed to parse result of", content.c_str()); return false; } if (doc.HasMember("id")) { const char *reg_id = doc["id"].GetString(); m_categories[category] = string(reg_id); m_logger->info("Registered child configuration category %s, registration id %s.", category.c_str(), reg_id); return true; } else if (doc.HasMember("message")) { m_logger->error("Failed to register child configuration category: %s.", doc["message"].GetString()); } else { m_logger->error("Failed to register child configuration category: %s.", content.c_str()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Register child configuration category failed %s.", e.what()); return false; } return false; } /** * Register interest in a configuration category * * @param category The name of the configuration category to register * @return bool True if the configuration category has been registered */ bool ManagementClient::registerCategory(const string& category) { ostringstream convert; if (m_uuid == 0) { // Not registered with core m_logger->error("Service is not registered with the core - not registering configuration interest"); return true; } try { convert << "{ \"category\" : \"" << JSONescape(category) << "\", "; convert << "\"service\" : \"" << *m_uuid << "\" }"; auto res = this->getHttpClient()->request("POST", "/fledge/interest", convert.str()); Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s category registration: %s\n", httpError?"HTTP error during":"Failed to parse result of", content.c_str()); return false; } if (doc.HasMember("id")) { const char *reg_id = doc["id"].GetString(); m_categories[category] = string(reg_id); m_logger->info("Registered configuration category %s, registration id %s.", category.c_str(), reg_id); return true; } else if (doc.HasMember("message")) { m_logger->error("Failed to register configuration category: %s.", doc["message"].GetString()); } else { m_logger->error("Failed to register configuration category: %s.", content.c_str()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Register configuration category failed %s.", e.what()); return false; } return false; } /** * Unregister interest in a configuration category. The service will no * longer be called when the configuration category is changed. * * @param category The name of the configuration category to unregister * @return bool True if the configuration category is unregistered */ bool ManagementClient::unregisterCategory(const string& category) { ostringstream convert; try { string url = "/fledge/interest/"; url += urlEncode(m_categories[category]); auto res = this->getHttpClient()->request("DELETE", url.c_str()); } catch (const SimpleWeb::system_error &e) { m_logger->error("Unregister configuration category failed %s.", e.what()); return false; } return false; } /** * Get the set of all configuration categories from the core micro service. * * @return ConfigCategories The set of all confguration categories */ ConfigCategories ManagementClient::getCategories() { try { string url = "/fledge/service/category"; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching configuration categories: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch configuration categories: %s.", doc["message"].GetString()); throw new exception(); } else { return ConfigCategories(response); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get config categories failed %s.", e.what()); throw; } } /** * Return the content of the named category by calling the * management API of the Fledge core. * * @param categoryName The name of the categpry to return * @return ConfigCategory The configuration category * @throw exception If the category does not exist or * the result can not be parsed */ ConfigCategory ManagementClient::getCategory(const string& categoryName) { try { string url = "/fledge/service/category/" + urlEncode(categoryName); auto res = this->getHttpClient()->request("GET", url.c_str()); string response = res->content.string(); if (res->status_code.compare("200 OK") == 0) { return ConfigCategory(categoryName, response); } Document doc; doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching configuration category for %s: %s\n", httpError?"HTTP error while":"Failed to parse result of", categoryName.c_str(), response.c_str()); throw new exception(); } else if (doc.HasMember("message") && doc["message"].IsString()) { m_logger->error("Failed to fetch configuration category: %s.", doc["message"].GetString()); throw new exception(); } else { m_logger->error("Failed to fetch configuration category: %s.", response.c_str()); throw new exception(); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get config category failed %s.", e.what()); throw; } } /** * Set a category configuration item value * * @param categoryName The given category name * @param itemName The given item name * @param itemValue The item value to set * @return JSON string of the updated * category item * @throw std::exception */ string ManagementClient::setCategoryItemValue(const string& categoryName, const string& itemName, const string& itemValue) { try { string url = "/fledge/service/category/" + urlEncode(categoryName) + "/" + urlEncode(itemName); string payload = "{ \"value\" : \"" + itemValue + "\" }"; auto res = this->getHttpClient()->request("PUT", url.c_str(), payload); Document doc; string response = res->content.string(); if (res->status_code.compare("200 OK") == 0) { return response; } doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s setting configuration category item value: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to set configuration category item value: %s.", doc["message"].GetString()); throw new exception(); } else { m_logger->error("Failed to set configuration category item value: %s.", response.c_str()); throw new exception(); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get config category failed %s.", e.what()); throw; } } /** * Return child categories of a given category * * @param categoryName The given category name * @return JSON string with current child categories * @throw std::exception */ ConfigCategories ManagementClient::getChildCategories(const string& categoryName) { try { string url = "/fledge/service/category/" + urlEncode(categoryName) + "/children"; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching child categories of %s: %s\n", httpError?"HTTP error while":"Failed to parse result of", categoryName.c_str(), response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch child categories of %s: %s.", categoryName.c_str(), doc["message"].GetString()); throw new exception(); } else { return ConfigCategories(response); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get child categories of %s failed %s.", categoryName.c_str(), e.what()); throw; } } /** * Add child categories to a (parent) category * * @param parentCategory The given category name * @param children Categories to add under parent * @return JSON string with current child categories * @throw std::exception */ string ManagementClient::addChildCategories(const string& parentCategory, const vector<string>& children) { try { string url = "/fledge/service/category/" + urlEncode(parentCategory) + "/children"; string payload = "{ \"children\" : ["; for (auto it = children.begin(); it != children.end(); ++it) { payload += "\"" + JSONescape((*it)) + "\""; if ((it + 1) != children.end()) { payload += ", "; } } payload += "] }"; auto res = this->getHttpClient()->request("POST", url.c_str(), payload); string response = res->content.string(); Document doc; doc.Parse(response.c_str()); if (doc.HasParseError() || !doc.HasMember("children")) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s adding child categories: %s\n", httpError?"HTTP error while":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to add child categories: %s.", doc["message"].GetString()); throw new exception(); } else { return response; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Add child categories failed %s.", e.what()); throw; } } /** * Get the asset tracking tuples * for a service or all services * * @param serviceName The serviceName to restrict data fetch * If empty records for all services are fetched * @return A vector of pointers to AssetTrackingTuple objects allocated on heap */ std::vector<AssetTrackingTuple*>& ManagementClient::getAssetTrackingTuples(const std::string serviceName) { std::vector<AssetTrackingTuple*> *vec = new std::vector<AssetTrackingTuple*>(); try { string url = "/fledge/track"; if (serviceName != "") { url += "?service="+urlEncode(serviceName); } auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetch asset tracking tuples: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch asset tracking tuples: %s.", doc["message"].GetString()); throw new exception(); } else { const rapidjson::Value& trackArray = doc["track"]; if (trackArray.IsArray()) { // Process every row and create the AssetTrackingTuple object for (auto& rec : trackArray.GetArray()) { if (!rec.IsObject()) { throw runtime_error("Expected asset tracker tuple to be an object"); } // Do not load "store" events as they bill be loaded by getStorageAssetTrackingTuples() if (rec["event"].GetString() == "store") { continue; } // Note: deprecatedTimestamp NULL value is returned as "" // otherwise it's a string DATE bool deprecated = rec.HasMember("deprecatedTimestamp") && strlen(rec["deprecatedTimestamp"].GetString()); AssetTrackingTuple *tuple = new AssetTrackingTuple(rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated); m_logger->debug("Adding AssetTracker tuple for service %s: %s:%s:%s, " \ "deprecated state is %d", rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated); vec->push_back(tuple); } } else { throw runtime_error("Expected array of rows in asset track tuples array"); } return (*vec); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Fetch/parse of asset tracking tuples for service %s failed: %s.", serviceName.c_str(), e.what()); //throw; } catch (...) { m_logger->error("Unexpected exception when retrieving asset tuples for service %s:, serviceName.c_str()"); } return *vec; } /** * Add a new asset tracking tuple * * @param service Service name * @param plugin Plugin name * @param asset Asset name * @param event Event type * @return whether operation was successful */ bool ManagementClient::addAssetTrackingTuple(const std::string& service, const std::string& plugin, const std::string& asset, const std::string& event) { ostringstream convert; try { convert << "{ \"service\" : \"" << JSONescape(service) << "\", "; convert << " \"plugin\" : \"" << plugin << "\", "; convert << " \"asset\" : \"" << asset << "\", "; convert << " \"event\" : \"" << event << "\" }"; auto res = this->getHttpClient()->request("POST", "/fledge/track", convert.str()); Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s asset tracking tuple addition: %s\n", httpError?"HTTP error during":"Failed to parse result of", content.c_str()); return false; } if (doc.HasMember("fledge")) { const char *reg_id = doc["fledge"].GetString(); return true; } else if (doc.HasMember("message")) { m_logger->error("Failed to add asset tracking tuple: %s.", doc["message"].GetString()); } else { m_logger->error("Failed to add asset tracking tuple: %s.", content.c_str()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Failed to add asset tracking tuple: %s.", e.what()); return false; } return false; } /** * Add an Audit Entry. Called when an auditable event occurs * to regsiter that event. * * Fledge API call example : * * curl -X POST -d '{"source":"LMTR", "severity":"WARNING", * "details":{"message":"Engine oil pressure low"}}' * http://localhost:8081/fledge/audit * * @param code The log code for the entry * @param severity The severity level * @param message The JSON message to log */ bool ManagementClient::addAuditEntry(const std::string& code, const std::string& severity, const std::string& message) { ostringstream convert; try { convert << "{ \"source\" : \"" << code << "\", "; convert << " \"severity\" : \"" << severity << "\", "; convert << " \"details\" : " << message << " }"; auto res = this->getHttpClient()->request("POST", "/fledge/audit", convert.str()); Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s audit entry: %s\n", (httpError ? "HTTP error during" : "Failed to parse result of"), content.c_str()); return false; } bool ret = false; // Check server reply if (doc.HasMember("source")) { // OK ret = true; } else if (doc.HasMember("message")) { // Erropr m_logger->error("Failed to add audit entry: %s.", doc["message"].GetString()); } else { // Erropr m_logger->error("Failed to add audit entry: %s.", content.c_str()); } return ret; } catch (const SimpleWeb::system_error &e) { m_logger->error("Failed to add audit entry: %s.", e.what()); return false; } return false; } /** * Checks and validate the JWT bearer token object as reference * * @param request The bearer token object * @return True on success, false otherwise */ bool ManagementClient::verifyAccessBearerToken(BearerToken& token) { if (!token.exists()) { m_logger->warn("Access bearer token has empty value"); return false; } return verifyBearerToken(token); } /** * Checks and validate the JWT bearer token coming from HTTP request * * @param request HTTP request object * @return True on success, false otherwise */ bool ManagementClient::verifyAccessBearerToken(shared_ptr<HttpServer::Request> request) { BearerToken bT(request); return this->verifyBearerToken(bT); } /** * Refresh the JWT bearer token string * * @param currentToken Current bearer token * @param newToken New issued bearer token being set * @return True on success, false otherwise */ bool ManagementClient::refreshBearerToken(const string& currentToken, string& newToken) { if (currentToken.length() == 0) { newToken.clear(); return false; } bool ret = false; // Refresh it by calling Fledge management endpoint string url = "/fledge/service/refresh_token"; string payload; SimpleWeb::CaseInsensitiveMultimap header; header.emplace("Authorization", "Bearer " + currentToken); auto res = this->getHttpClient()->request("POST", url.c_str(), payload, header); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s error in service token refresh: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); ret = false; } else { if (doc.HasMember("error")) { if (doc["error"].IsString()) { string error = doc["error"].GetString(); m_logger->error("Failed to refresh refresh bearer token, error %s", error.c_str()); } else { m_logger->error("Failed to refresh beearer token result: %s", response.c_str()); } ret = false; } else if (doc.HasMember("bearer_token")) { // Set new token newToken = doc["bearer_token"].GetString(); ret = true; } else { m_logger->error("Bearer token not found in token refresh result: %s", response.c_str()); ret = false; } } m_mtx_rTokens.lock(); if (ret) { // Remove old token from received ones m_received_tokens.erase(currentToken); } else { newToken.clear(); } m_mtx_rTokens.unlock(); return ret; } /** * Checks and validate the JWT bearer token string * * Input token internal data will be set * with new values or cached ones * * @param bearerToken The bearer token object * @return True on success, false otherwise */ bool ManagementClient::verifyBearerToken(BearerToken& bearerToken) { if (!bearerToken.exists()) { m_logger->warn("Bearer token has empty value"); return false; } bool ret = true; const string& token = bearerToken.token(); // Check token already exists in cache: map<string, BearerToken>::iterator item; // Acquire lock m_mtx_rTokens.lock(); item = m_received_tokens.find(token); if (item == m_received_tokens.end()) { // Token is not in the cache bool verified = false; // Token does not exist: // Verify it by calling Fledge management endpoint string url = "/fledge/service/verify_token"; string payload; SimpleWeb::CaseInsensitiveMultimap header; header.emplace("Authorization", "Bearer " + token); auto res = this->getHttpClient()->request("POST", url.c_str(), payload, header); string response = res->content.string(); // Parse JSON message and store claims in input token object verified = bearerToken.verify(response); if (verified) { // Token verified, store the token object m_received_tokens.emplace(token, bearerToken); } else { ret = false; m_logger->error("Micro service bearer token '%s' not verified.", token.c_str()); } #ifdef DEBUG_BEARER_TOKEN m_logger->debug("New token verified by core API endpoint %d, claims %s:%s:%s:%ld", ret, bearerToken.getAudience().c_str(), bearerToken.getSubject().c_str(), bearerToken.getIssuer().c_str(), bearerToken.getExpiration()); #endif } else { // Token is in the cache unsigned long expiration = (*item).second.getExpiration(); unsigned long now = time(NULL); // Check expiration if (now >= expiration) { ret = false; // Remove token from received ones m_received_tokens.erase(token); m_logger->error("Micro service bearer token expired."); } // Set input token object as per cached data bearerToken = (*item).second; #ifdef DEBUG_BEARER_TOKEN m_logger->debug("Existing token already verified %d, claims %s:%s:%s:%ld", ret, (*item).second.getAudience().c_str(), (*item).second.getSubject().c_str(), (*item).second.getIssuer().c_str(), (*item).second.getExpiration()); #endif } // Release lock m_mtx_rTokens.unlock(); return ret; } /** * Request that the core proxy a URL to the service. URL's in the public Fledge API will be forwarded * to the service API of the named service. * * @param serviceName The name of the service to send the request to * @param operation The type of operations; post, put, get or delete * @param publicEndpoint The URL inthe Fledge public API to be proxied * @param privateEnpoint The URL in the service API of the named service to which the reuests will be proxied. * @return bool True if the proxy request was accepted */ bool ManagementClient::addProxy(const std::string& serviceName, const std::string& operation, const std::string& publicEndpoint, const std::string& privateEndpoint) { ostringstream convert; try { convert << "{ \"" << operation << "\" : { "; convert << "\"" << publicEndpoint << "\" : "; convert << "\"" << privateEndpoint << "\" } "; convert << "\"service_name\" : \"" << serviceName << "\" }"; auto res = this->getHttpClient()->request("POST", "/fledge/proxy", convert.str()); Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s proxy addition: %s\n", (httpError ? "HTTP error during" : "Failed to parse result of"), content.c_str()); return false; } bool result = false; if (res->status_code[0] == '2') // A 2xx response { result = true; } if (doc.HasMember("message")) { m_logger->error("Add proxy entry: %s.", doc["message"].GetString()); return result; } return result; } catch (const SimpleWeb::system_error &e) { m_logger->error("Failed to add proxt entry: %s.", e.what()); return false; } return false; } /** * Request that the core proxy a URL to the service. URL's in the public Fledge API will be forwarded * to the service API of the named service. * * @param serviceName The name of the service to send the request to * @param endpoints The set of endpoints to be mapped * @return bool True if the proxy request was accepted */ bool ManagementClient::addProxy(const std::string& serviceName, const map<std::string, vector<pair<string, string> > >& endpoints) { ostringstream convert; try { convert << "{ "; for (auto const& op : endpoints) { convert << "\"" << op.first << "\" : { "; bool first = true; for (auto const& ep : op.second) { if (!first) convert << ", "; first = false; convert << "\"" << ep.first << "\" :"; convert << "\"" << ep.second << "\""; } convert << "}, "; } convert << "\"service_name\" : \"" << serviceName << "\" }"; auto res = this->getHttpClient()->request("POST", "/fledge/proxy", convert.str()); Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s proxy addition: %s\n", (httpError ? "HTTP error during" : "Failed to parse result of"), content.c_str()); return false; } bool result = false; if (res->status_code[0] == '2') // A 2xx response { result = true; } if (doc.HasMember("message")) { m_logger->error("Add proxy entries: %s.", doc["message"].GetString()); return result; } return result; } catch (const SimpleWeb::system_error &e) { m_logger->error("Failed to add proxy entry: %s.", e.what()); return false; } return false; } /** * Delete the current proxy endpoitn for the named service. Normally called prior * to the service shutting down. * * @param serviceName THe name of the service to sto the proxying for * @return bool True if the request succeeded */ bool ManagementClient::deleteProxy(const std::string& serviceName) { bool result = false; try { string url = "/fledge/proxy/"; url += urlEncode(serviceName); auto res = this->getHttpClient()->request("DELETE", url.c_str()); if (res->status_code[0] == '2') // A 2xx response { result = true;; } Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s service proxy deletion: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); return result; } else if (doc.HasMember("message")) { m_logger->error("Stop proxy of endpoints for service: %s.", doc["message"].GetString()); return result; } else { m_logger->info("API proxying has been stopped"); return result; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Proxy deletion failed %s.", e.what()); return false; } return false; } /** * Get the asset tracking tuple * for a service and asset name * * @param serviceName The serviceName to restrict data fetch * @param assetName The asset name that belongs to the service * @param event The associated event type * @return A vector of pointers to AssetTrackingTuple objects allocated on heap */ AssetTrackingTuple* ManagementClient::getAssetTrackingTuple(const std::string& serviceName, const std::string& assetName, const std::string& event) { AssetTrackingTuple* tuple = NULL; try { string url = "/fledge/track"; if (serviceName == "" && assetName == "" && event == "") { m_logger->error("Failed to fetch asset tracking tuple: " \ "service name, asset name and event type are required."); throw new exception(); } url += "?service=" + urlEncode(serviceName); url += "&asset=" + urlEncode(assetName) + "&event=" + event; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetch asset tracking tuple: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch asset tracking tuple: %s.", doc["message"].GetString()); throw new exception(); } else { const rapidjson::Value& trackArray = doc["track"]; if (trackArray.IsArray()) { // Process every row and create the AssetTrackingTuple object for (auto& rec : trackArray.GetArray()) { if (!rec.IsObject()) { throw runtime_error("Expected asset tracker tuple to be an object"); } // Note: deprecatedTimestamp NULL value is returned as "" // otherwise it's a string DATE bool deprecated = rec.HasMember("deprecatedTimestamp") && strlen(rec["deprecatedTimestamp"].GetString()); // Create a new AssetTrackingTuple object, to be freed by the caller tuple = new AssetTrackingTuple(rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated); m_logger->debug("Adding AssetTracker tuple for service %s: %s:%s:%s, " \ "deprecated state is %d", rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated); } } else { throw runtime_error("Expected array of rows in asset track tuples array"); } return tuple; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Fetch/parse of asset tracking tuples for service %s failed: %s.", serviceName.c_str(), e.what()); } catch (...) { m_logger->error("Unexpected exception when retrieving asset tuples for service %s", serviceName.c_str()); } return tuple; } /** * Get the asset tracking tuples for all the deprecated assets * * @return A vector of pointers to AssetTrackingTuple objects allocated on heap */ AssetTrackingTable* ManagementClient::getDeprecatedAssetTrackingTuples() { AssetTrackingTable* table = NULL; try { string url = "/fledge/track?deprecated=true"; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetch asset tracking tuple: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch asset tracking tuple: %s.", doc["message"].GetString()); throw new exception(); } else { const rapidjson::Value& trackArray = doc["track"]; if (trackArray.IsArray()) { table = new AssetTrackingTable(); // Process every row and create the AssetTrackingTuple object for (auto& rec : trackArray.GetArray()) { if (!rec.IsObject()) { throw runtime_error("Expected asset tracker tuple to be an object"); } // Note: deprecatedTimestamp NULL value is returned as "" // otherwise it's a string DATE bool deprecated = rec.HasMember("deprecatedTimestamp") && strlen(rec["deprecatedTimestamp"].GetString()); // Create a new AssetTrackingTuple object, to be freed by the caller AssetTrackingTuple *tuple = new AssetTrackingTuple(rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated); m_logger->debug("Adding AssetTracker tuple for service %s: %s:%s:%s, " \ "deprecated state is %d", rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated); table->add(tuple); } } else { throw runtime_error("Expected array of rows in asset track tuples array"); } return table; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Fetch/parse of deprecated asset tracking tuples failed: %s.", e.what()); } catch (...) { m_logger->error("Unexpected exception when retrieving asset tuples for deprecated assets"); } return table; } /** * Return the content of the named ACL by calling the * management API of the Fledge core. * * @param aclName The name of the ACL to return * @return ACL The ACL class * @throw exception If the ACL does not exist or * the JSON result can not be parsed */ ACL ManagementClient::getACL(const string& aclName) { try { string url = "/fledge/ACL/" + urlEncode(aclName); auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetching ACL for %s: %s\n", httpError?"HTTP error while":"Failed to parse result of", aclName.c_str(), response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch ACL: %s.", doc["message"].GetString()); throw new exception(); } else { // Success return ACL(response); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Get ACL failed %s.", e.what()); throw; } } /** * Get the asset tracking tuple * for a service and asset name * * @param serviceName The serviceName to restrict data fetch * @param assetName The asset name that belongs to the service * @param event The associated event type * @param dp The datapoints Type * @param c The count of datapoints * @return A pointer to AssetTrackingTuple objects allocated on heap */ StorageAssetTrackingTuple* ManagementClient::getStorageAssetTrackingTuple(const std::string& serviceName, const std::string& assetName, const std::string& event, const std::string& dp, const unsigned int& c) { StorageAssetTrackingTuple* tuple = NULL; try { string url = "/fledge/track"; if (serviceName == "" || assetName == "" || event == "") { m_logger->error("Failed to fetch storage asset tracking tuple: " \ "service name, asset name and event type are required."); throw new exception(); } url += "?service=" + urlEncode(serviceName); url += "&asset=" + urlEncode(assetName) + "&event=" + event; auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetch storage asset tracking tuple: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch storage asset tracking tuple: %s.", doc["message"].GetString()); throw new exception(); } else { const rapidjson::Value& trackArray = doc["track"]; if (trackArray.IsArray()) { // Process every row and create the AssetTrackingTuple object for (auto& rec : trackArray.GetArray()) { m_logger->debug("%s:%d Inside for loop of trackArray ", __FUNCTION__, __LINE__); if (!rec.IsObject()) { throw runtime_error("Expected storage asset tracker tuple to be an object"); } // Note: deprecatedTimestamp NULL value is returned as "" // otherwise it's a string DATE bool deprecated = rec.HasMember("deprecatedTimestamp") && strlen(rec["deprecatedTimestamp"].GetString()); std::string data ; if (!rec.HasMember("data")) { throw runtime_error("Expected storage asset tracker tuple to contain member data"); } const rapidjson::Value& dataVal = rec["data"]; if (!dataVal.IsObject()) { throw runtime_error("Expected data in storage asset tracker tuple to be an object"); } if (!dataVal.HasMember("datapoints")) { throw runtime_error("Expected asset tracker tuple to contain datapoints"); } if (dataVal.ObjectEmpty()) { m_logger->error("%s:%d dataVal Object empty " , __FUNCTION__, __LINE__); continue; } if (!dataVal["datapoints"].IsArray()) { throw runtime_error("Expected datapoints to be object"); } std::string datapoints; for (auto& r : dataVal["datapoints"].GetArray()) { if (!r.IsString()) { throw runtime_error("Expected r to be string"); } else { datapoints.append(r.GetString()); datapoints.append(","); } } if (datapoints[datapoints.size()-1] == ',') { datapoints.pop_back(); } if(validateDatapoints(dp,datapoints)) { //datapoints in db not same as in arg, continue m_logger->debug("%s:%d :Datapoints in db not same as in arg", __FUNCTION__, __LINE__); continue; } if (!dataVal.HasMember("count")) { throw runtime_error("Expected asset tracker tuple to contain count"); } if (!dataVal["count"].IsInt()) { throw runtime_error("Expected count in data to be int"); } int count = dataVal["count"].GetInt(); if ( count != c) { // count not same, continue m_logger->debug("%s:%d :count in db not same as received in arg", __FUNCTION__, __LINE__); continue; } // Create a new AssetTrackingTuple object, to be freed by the caller tuple = new StorageAssetTrackingTuple(rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated, datapoints, count); m_logger->debug("%s:%d : Adding StorageAssetTracker tuple for service %s: %s:%s:%s, " \ "deprecated state is %d, datapoints %s , count %d",__FUNCTION__, __LINE__, rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated, datapoints.c_str(), count); } } else { throw runtime_error("Expected array of rows in storage asset track tuples array"); } return tuple; } } catch (const SimpleWeb::system_error &e) { m_logger->error("Fetch/parse of storage asset tracking tuples for service %s failed: %s.", serviceName.c_str(), e.what()); } catch (...) { m_logger->error("Unexpected exception when retrieving storage asset tuples for service %s", serviceName.c_str()); } return tuple; } /** * Add a new asset tracking tuple * * @param service Service name * @param plugin Plugin name * @param asset Asset name * @param event Event type * @param deprecated Deprecated or not * @param datapoints Datapoints type * @param count Count Type * @return whether operation was successful */ bool ManagementClient::addStorageAssetTrackingTuple(const std::string& service, const std::string& plugin, const std::string& asset, const std::string& event, const bool& deprecated, const std::string& datapoints, const int& count) { ostringstream convert; std::string d ; for ( int i = 0; i < datapoints.size(); ++i) { if (datapoints[i] == ',') { d.append("\",\""); } else d.append(1,datapoints[i]); } try { convert << "{ \"service\" : \"" << JSONescape(service) << "\", "; convert << " \"plugin\" : \"" << plugin << "\", "; convert << " \"asset\" : \"" << asset << "\", "; convert << " \"event\" : \"" << event << "\", "; convert << " \"deprecated\" :\"" << deprecated << "\", "; convert << " \"data\" : { \"datapoints\" : \[ \"" << d << "\" ], "; convert << " \"count\" : " << count << " } }"; auto res = this->getHttpClient()->request("POST", "/fledge/track", convert.str()); if (res->status_code[0] == '2') // A 2xx response { return true; } Document doc; string content = res->content.string(); doc.Parse(content.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(content[0]) && isdigit(content[1]) && isdigit(content[2]) && content[3]==':'); m_logger->error("%s:%d , %s storage asset tracking tuple addition: %s\n",__FUNCTION__, __LINE__, httpError?"HTTP error during":"Failed to parse result of", content.c_str()); return false; } else if (doc.HasMember("message")) { m_logger->error("%s:%d Failed to add storage asset tracking tuple: %s.",__FUNCTION__, __LINE__, doc["message"].GetString()); } else { m_logger->error("%s:%d Failed to add storage asset tracking tuple: %s.",__FUNCTION__, __LINE__, content.c_str()); } } catch (const SimpleWeb::system_error &e) { m_logger->error("%s:%d Failed to add storage asset tracking tuple: %s.",__FUNCTION__, __LINE__, e.what()); return false; } return false; } /** * Get the storage asset tracking tuples * for a service or all services * * @param serviceName The serviceName to restrict data fetch * If empty records for all services are fetched * @return A vector of pointers to AssetTrackingTuple objects allocated on heap */ std::vector<StorageAssetTrackingTuple*>& ManagementClient::getStorageAssetTrackingTuples(const std::string serviceName) { std::vector<StorageAssetTrackingTuple*> *vec = new std::vector<StorageAssetTrackingTuple*>(); try { string url = "/fledge/track"; if (serviceName != "") { url += "?service="+urlEncode(serviceName); } auto res = this->getHttpClient()->request("GET", url.c_str()); Document doc; string response = res->content.string(); doc.Parse(response.c_str()); if (doc.HasParseError()) { bool httpError = (isdigit(response[0]) && isdigit(response[1]) && isdigit(response[2]) && response[3]==':'); m_logger->error("%s fetch asset tracking tuples: %s\n", httpError?"HTTP error during":"Failed to parse result of", response.c_str()); throw new exception(); } else if (doc.HasMember("message")) { m_logger->error("Failed to fetch asset tracking tuples: %s.", doc["message"].GetString()); throw new exception(); } else { const rapidjson::Value& trackArray = doc["track"]; if (trackArray.IsArray()) { // Process every row and create the AssetTrackingTuple object for (auto& rec : trackArray.GetArray()) { if (!rec.IsObject()) { throw runtime_error("Expected asset tracker tuple to be an object"); } // Note: deprecatedTimestamp NULL value is returned as "" // otherwise it's a string DATE bool deprecated = rec.HasMember("deprecatedTimestamp") && strlen(rec["deprecatedTimestamp"].GetString()); std::string data ; if (!rec.HasMember("data")) { throw runtime_error("Expected asset tracker tuple to contain member data"); } const rapidjson::Value& dataVal = rec["data"]; if (!dataVal.IsObject()) { throw runtime_error("Expected data asset tracker tuple to be an object"); } if (dataVal.ObjectEmpty()) { m_logger->debug("%s:%d dataVal Object empty " , __FUNCTION__, __LINE__); continue; } if (!dataVal.HasMember("datapoints")) { throw runtime_error("Expected asset tracker tuple to contain datapoints"); } if (!dataVal["datapoints"].IsArray()) { throw runtime_error("Expected datapoints to be array"); } std::string datapoints; for (auto& r : dataVal["datapoints"].GetArray()) { if (!r.IsString()) { throw runtime_error("Expected individual datapoints in datapoints array to be string"); } else { datapoints.append(r.GetString()); datapoints.append(","); } } if( datapoints[datapoints.size()-1] == ',') { datapoints.pop_back(); } if (!dataVal.HasMember("count")) { throw runtime_error("Expected asset tracker tuple to contain count"); } if (!dataVal["count"].IsInt()) { throw runtime_error("Expected count in data to be int"); } int count = dataVal["count"].GetInt(); m_logger->debug("%s:%d count = %d ", __FUNCTION__, __LINE__, count); StorageAssetTrackingTuple *tuple = new StorageAssetTrackingTuple(rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated, datapoints, count); m_logger->debug("%s:%d: Adding StorageAssetTracker tuple for service %s: %s:%s:%s, " \ "deprecated state is %d, datapoints %s , count %d" ,__FUNCTION__, __LINE__, rec["service"].GetString(), rec["plugin"].GetString(), rec["asset"].GetString(), rec["event"].GetString(), deprecated, datapoints.c_str(), count); vec->push_back(tuple); } } else { throw runtime_error("Expected array of rows in asset track tuples array"); } return (*vec); } } catch (const SimpleWeb::system_error &e) { m_logger->error("Fetch/parse of asset tracking tuples for service %s failed: %s.", serviceName.c_str(), e.what()); } catch (...) { m_logger->error("Unexpected exception when retrieving asset tuples for service %s", serviceName.c_str()); } return *vec; } /** * Compare the datapoints to be equal or not, they can be '"' enclosed * * @param dp1 The datapoint to compare, enclosed in '"' * @param dp2 The datapoint to compare * @return int integer depicting result of comparison, 0 on equal */ int ManagementClient::validateDatapoints(std::string dp1, std::string dp2) { std::string temp; for (int i = 0; i < dp1.size(); ++i) { if ( dp1[i] != '"') temp.push_back(dp1[i]); } return temp.compare(dp2); } /** * Get an alert by specific key * * @param key Key to get alert * @return string Alert */ std::string ManagementClient::getAlertByKey(const std::string& key) { std::string response = "Status: 404 Not found"; try { std::string url = "/fledge/alert/" + urlEncode(key) ; auto res = this->getHttpClient()->request("GET", url.c_str()); std::string statusCode = res->status_code; if (statusCode.compare("200 OK")) { m_logger->error("Get alert failed %s.", statusCode.c_str()); response = "Status: " + statusCode; return response; } response = res->content.string(); } catch (const SimpleWeb::system_error &e) { m_logger->error("Get alert failed %s.", e.what()); } return response; } /** * Raise an alert * * @param key Alert key * @param message Alert message * @param urgency Alert urgency * @return whether operation was successful */ bool ManagementClient::raiseAlert(const std::string& key, const std::string& message, const std::string& urgency) { try { std::string url = "/fledge/alert" ; ostringstream payload; payload << "{\"key\":\"" << key << "\"," << "\"message\":\"" << message << "\"," << "\"urgency\":\"" << urgency << "\"}"; auto res = this->getHttpClient()->request("POST", url.c_str(), payload.str()); std::string statusCode = res->status_code; if (statusCode.compare("200 OK")) { m_logger->error("Raise alert failed %s.", statusCode.c_str()); return false; } return true; } catch (const SimpleWeb::system_error &e) { m_logger->error("Raise alert failed %s.", e.what()); return false; } } /** * Clear an alert * * @param key Alert key * @return whether operation was successful */ bool ManagementClient::clearAlert(const std::string& key) { try { std::string url = "/fledge/alert/" + urlEncode(key); auto res = this->getHttpClient()->request("DELETE", url.c_str()); std::string statusCode = res->status_code; if (statusCode.compare("200 OK")) { m_logger->error("Clear alert failed %s.", statusCode.c_str()); return false; } return true; } catch (const SimpleWeb::system_error &e) { m_logger->error("Clear alert failed %s.", e.what()); return false; } } ================================================ FILE: C/common/pipeline_branch.cpp ================================================ /* * Fledge pipeline branch class * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pipeline_element.h> #include <filter_pipeline.h> #include <config_handler.h> #include <service_handler.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" using namespace std; /** * Constructor for a branch in a filter pipeline */ PipelineBranch::PipelineBranch(FilterPipeline *parent) : m_pipeline(parent), m_thread(NULL), PipelineElement() { m_shutdownCalled = false; } /** * Destructor for the pipeline branch * * If the pipeline is not already shutdown then shut it down * Delete the thread if it exists. */ PipelineBranch::~PipelineBranch() { if (!m_shutdownCalled) { m_shutdownCalled = true; m_cv.notify_all(); if (m_thread->joinable()) m_thread->join(); } if (m_thread) { delete m_thread; } // Clear any queued readings while (!m_queue.empty()) { ReadingSet *readings = m_queue.front(); m_queue.pop(); delete readings; } for (auto it = m_branch.begin(); it != m_branch.end(); ++it) { delete *it; } } /** * Setup the configuration for a branch in a pipeline * * @param mgtClient The management client * @param children A vector to fill with child configuration categories */ bool PipelineBranch::setupConfiguration(ManagementClient *mgtClient, vector<string>& children) { for (auto it = m_branch.begin(); it != m_branch.end(); ++it) { (*it)->setupConfiguration(mgtClient, children); } return true; } /** * Setup the configuration categories for the branch element of * a pipeline. The branch itself has no category, but it must call * the setup method on all items in the child branch of the * piepline. * * @param mgmt The management client * @param ingest The configuration handler for our service * @param filterCategories A map of the category names to pipeline elements */ bool PipelineBranch::setup(ManagementClient *mgmt, void *ingest, map<string, PipelineElement *>& filterCategories) { vector<string> children; for (auto it = m_branch.begin(); it != m_branch.end(); ++it) { if ((*it)->isBranch()) { PipelineBranch *branch = (PipelineBranch *)(*it); branch->setFunctions(m_passOnward, m_useData, m_ingest); } (*it)->setup(mgmt, ingest, filterCategories); } return true; } /** * Initialise the pipeline branch. * * Initialise the elements of the child pipeline * Spawn a thread to excute the child pipeline. * * @param config The filter configuration * @param outHandle The pipeline element on the "main branch" * @param output */ bool PipelineBranch::init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output) { bool initErrors = false; string errMsg = "'plugin_init' failed for filter '"; for (auto it = m_branch.begin(); it != m_branch.end(); ++it) { try { Logger::getLogger()->info("Initialise %s on pipeline branch", (*it)->getName().c_str()); // Iterate the load filters set in the Ingest class m_filters member if ((it + 1) != m_branch.end()) { (*it)->setNext(*(it + 1)); // Set next filter pointer as OUTPUT_HANDLE if (!(*it)->init((OUTPUT_HANDLE *)(*(it + 1)), filterReadingSetFn(m_passOnward))) { errMsg += (*it)->getName() + "'"; initErrors = true; break; } } else { // Set the Ingest class pointer as OUTPUT_HANDLE if (!(*it)->init((OUTPUT_HANDLE *)(m_ingest), filterReadingSetFn(m_useData))) { errMsg += (*it)->getName() + "'"; initErrors = true; break; } } } // TODO catch specific exceptions catch (...) { throw; } } if (initErrors) { // Failure Logger::getLogger()->fatal("%s error: %s", __FUNCTION__, errMsg.c_str()); return false; } Logger::getLogger()->debug("Create branch handler thread"); m_thread = new thread(PipelineBranch::branchHandler, this); //Success return true; } /** * Ingest a set of readings and pass on in the pipeline. Create a deep copy * and queue the copy into the branched pipeline. * * @param readingSet The set of readings to ingest */ void PipelineBranch::ingest(READINGSET *readingSet) { if (m_debugger) { PipelineDebugger::DebuggerActions action = m_debugger->process(readingSet); switch (action) { case PipelineDebugger::Block: delete readingSet; return; case PipelineDebugger::NoAction: break; } } m_pipeline->startBranch(); READINGSET *copy = new ReadingSet(); copy->copy(*readingSet); unique_lock<mutex> lck(m_mutex); m_queue.push(copy); lck.unlock(); m_cv.notify_one(); if (m_next) { m_next->ingest(readingSet); } else { // Pipeline branch has no downstream element, write direct to storage (*(OUTPUT_STREAM)m_useData)(m_ingest, readingSet); } } /** * Setup the configuration categories for the branch element of * a pipeline. The branch itself has no category, but it must call * the setup method on all items in the child branch of the * piepline. * * @param mgmt The management client * @param ingest The configuration handler for our service * @param filterCategories A map of the category names to pipeline elements */ void PipelineBranch::shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler) { // Shutdown the handler thread m_shutdownCalled = true; m_cv.notify_all(); m_thread->join(); delete m_thread; m_thread = NULL; // Shutdown the filter elements on the branch for (auto it = m_branch.begin(); it != m_branch.end(); ++it) { (*it)->shutdown(serviceHandler, configHandler); } // Clear any queued readings while (!m_queue.empty()) { ReadingSet *readings = m_queue.front(); m_queue.pop(); delete readings; } } /** * Return if the branch is ready to be executed */ bool PipelineBranch::isReady() { return true; } /** * Static entry point for the thread that handles sending data on the * branch * * @param instance The instance of the PipelineBranch */ void PipelineBranch::branchHandler(void *instance) { PipelineBranch *branch = (PipelineBranch *)instance; branch->handler(); } /** * The handler for readings in an instance of a branch. * Loop waiting for data or a shutdown signal and pass the * queued data to the first filter in the pipeline branch */ void PipelineBranch::handler() { Logger::getLogger()->info("Starting thread to process branch pipeline"); while (!m_shutdownCalled) { unique_lock<mutex> lck(m_mutex); while (m_queue.empty()) { m_cv.wait(lck); if (m_shutdownCalled) { return; } } ReadingSet *readings = m_queue.front(); m_queue.pop(); lck.unlock(); m_branch[0]->ingest(readings); m_pipeline->completeBranch(); } } ================================================ FILE: C/common/pipeline_debugger.cpp ================================================ /* * Fledge pipeline debugger class * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pipeline_debugger.h> using namespace std; /** * Constructor for the pipeline element debugger */ PipelineDebugger::PipelineDebugger() : m_buffer(NULL) { } /** * Destructor for the pipeline element debugger */ PipelineDebugger::~PipelineDebugger() { if (m_buffer) delete m_buffer; } /** * Process a reading set as it flows through the pipeline. * The main purpose here is to buffer the readings in the circular * buffer in order to allow later examination of the data. * * @param readings The reading set flowing into the pipeline element * @return DebuggerActions Action signal to the pipeline */ PipelineDebugger::DebuggerActions PipelineDebugger::process(ReadingSet *readings) { lock_guard<mutex> guard(m_bufferMutex); if (!m_buffer) return NoAction; m_buffer->insert(readings->getAllReadings()); return NoAction; } /** * Set the size of the circular buffer used to buffer * the data flowing in the pipeline * * @param size The number of readings to buffer */ void PipelineDebugger::setBuffer(unsigned int size) { lock_guard<mutex> guard(m_bufferMutex); if (m_buffer) { delete m_buffer; } m_buffer = new ReadingCircularBuffer(size); } /** * Remove the circular buffer of readings and stop the * process of storing future readings */ void PipelineDebugger::clearBuffer() { lock_guard<mutex> guard(m_bufferMutex); if (m_buffer) { delete m_buffer; m_buffer = NULL; } } /** * Fetch the current contents of the circular buffer. A vector * of shared pointers is returned to alleviate the need to * copy the readings. * * @return vector<shared_ptr<Reading> The readings that are returned */ std::vector<std::shared_ptr<Reading>> PipelineDebugger::fetchBuffer() { vector<std::shared_ptr<Reading>> vec; lock_guard<mutex> guard(m_bufferMutex); if (m_buffer) { int extracted = m_buffer->extract(vec); Logger::getLogger()->debug("Debugger return %d readings", extracted); } return vec; } ================================================ FILE: C/common/pipeline_element.cpp ================================================ /* * Fledge pipeline element classes * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pipeline_element.h> #include <filter_pipeline.h> #include <config_handler.h> #include <service_handler.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" using namespace std; /** * Attach a debugger class to the pipeline * * @return bool True if a debugger is attached to the element */ bool PipelineElement::attachDebugger() { if (!m_debugger) m_debugger = new PipelineDebugger(); return m_debugger ? true : false; } /** * Detach a pipeline debugger from the pipeline element */ void PipelineElement::detachDebugger() { if (m_debugger) delete m_debugger; m_debugger = NULL; } /** * Setup the size of the debug buffer * * @param size Number of readings to buffer */ void PipelineElement::setDebuggerBuffer(unsigned int size) { if (m_debugger) { if (size) m_debugger->setBuffer(size); else m_debugger->clearBuffer(); } } /** * Fetch the content of the debugger buffer * * @return vector<shared_ptr<ReadingSet>> The current contents of the debugger buffer */ vector<shared_ptr<Reading>> PipelineElement::getDebuggerBuffer() { if (m_debugger) { return m_debugger->fetchBuffer(); } vector<shared_ptr<Reading>> empty; return empty; } ================================================ FILE: C/common/pipeline_filter.cpp ================================================ /* * Fledge pipeline filter class * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pipeline_element.h> #include <filter_pipeline.h> #include <config_handler.h> #include <service_handler.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" using namespace std; /** * Construct the PipelineFilter class. This is the * specialisation of the PipelineElement that represents * a running filter in the pipeline. */ PipelineFilter::PipelineFilter(const string& name, const ConfigCategory& filterDetails) : PipelineElement(), m_name(name), m_plugin(NULL) { m_name = name; if (!filterDetails.itemExists("plugin")) { string errMsg("loadFilters: 'plugin' item not found "); errMsg += "in " + m_name + " category"; Logger::getLogger()->fatal(errMsg.c_str()); throw runtime_error(errMsg); } m_pluginName = filterDetails.getValue("plugin"); // Load filter plugin only: we don't call any plugin method right now m_handle = loadFilterPlugin(m_pluginName); if (!m_handle) { string errMsg("Cannot load filter plugin '" + m_pluginName + "'"); Logger::getLogger()->fatal(errMsg.c_str()); throw runtime_error(errMsg); } } /** * Destructor for the pipeline filter element */ PipelineFilter::~PipelineFilter() { delete m_plugin; } /** * Setup the configuration for a filter in a pipeline * * @param mgtClient The managament client * @param children A vector to fill with child configuration categories */ bool PipelineFilter::setupConfiguration(ManagementClient *mgtClient, vector<string>& children) { PluginManager *manager = PluginManager::getInstance(); string filterConfig = manager->getInfo(m_handle)->config; m_categoryName = m_serviceName + "_" + m_name; // Create/Update default filter category items DefaultConfigCategory filterDefConfig(m_categoryName, filterConfig); string filterDescription = "Configuration of '" + m_name; filterDescription += "' filter for plugin '" + m_pluginName + "'"; filterDefConfig.setDescription(filterDescription); if (!mgtClient->addCategory(filterDefConfig, true)) { string errMsg("Cannot create/update '" + \ m_categoryName + "' filter category"); Logger::getLogger()->fatal(errMsg.c_str()); return false; } children.push_back(m_serviceName + '_' + m_name); // Instantiate the FilterPlugin class // in order to call plugin entry points m_plugin = new FilterPlugin(m_name, m_handle); if (!m_plugin) return false; return true; } /** * Load the specified filter plugin * * @param filterName The filter plugin to load * @return Plugin handle on success, NULL otherwise * */ PLUGIN_HANDLE PipelineFilter::loadFilterPlugin(const string& filterName) { if (filterName.empty()) { Logger::getLogger()->error("Unable to fetch filter plugin '%s' from configuration.", filterName.c_str()); // Failure return NULL; } Logger::getLogger()->info("Loading filter plugin '%s'.", filterName.c_str()); PluginManager *manager = PluginManager::getInstance(); PLUGIN_HANDLE handle; if ((handle = manager->loadPlugin(filterName, PLUGIN_TYPE_FILTER)) != NULL) { // Success Logger::getLogger()->info("Loaded filter plugin '%s'.", filterName.c_str()); } return handle; } /** * Setup the configuration categories for the filter * element in a pipeline * * @param mgmt The Management client * @param ingest The service handler for our service * @param filterCatiegories A map of the category name to pipeline element */ bool PipelineFilter::setup(ManagementClient *mgmt, void *ingest, map<string, PipelineElement *>& filterCategories) { vector<string> children; Logger::getLogger()->info("Load plugin categoryName %s for %s", m_categoryName.c_str(), m_name.c_str()); // Fetch up to date filter configuration try { m_updatedCfg = mgmt->getCategory(m_categoryName); // Pass Management client IP:Port to filter so that it may connect to bucket service m_updatedCfg.addItem("mgmt_client_url_base", "Management client host and port", "string", "127.0.0.1:0", mgmt->getUrlbase()); // Add filter category name under service/process config name children.push_back(m_categoryName); mgmt->addChildCategories(m_serviceName, children); } catch (...) { Logger::getLogger()->error("Failed to fetch configuration %s for filter %s", m_categoryName.c_str(), m_name.c_str()); return false; } ConfigHandler *configHandler = ConfigHandler::getInstance(mgmt); configHandler->registerCategory((ServiceHandler *)ingest, m_categoryName); filterCategories[m_categoryName] = this; return true; } /** * Initialise the pipeline filter ready for ingest of data * * @param outHandle The pipeline element we are sending the data to * @param output */ bool PipelineFilter::init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output) { m_plugin->init(m_updatedCfg, outHandle, output); if (m_plugin->persistData()) { // Plugin support SP_PERSIST_DATA // Instantiate the PluginData class m_plugin->m_plugin_data = new PluginData(m_storage); // Load plugin data from storage layer string pluginStoredData = m_plugin->m_plugin_data->loadStoredData(m_serviceName + m_name + m_pluginName); //call 'plugin_start' with plugin data: startData() m_plugin->startData(pluginStoredData); } return true; } /** * Shutdown a pipeline element that is a filter. * * Remove registration for categories of interest, persist and plugin * data that needs persisting and call shutdown on the plugin itself. * * @param serviceHandler The service handler of the service that is hostign the pipeline * @param configHandler The config handler for the service from which we unregister */ void PipelineFilter::shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler) { string filterCategoryName = m_serviceName + "_" + m_name; configHandler->unregisterCategory(serviceHandler, filterCategoryName); // If plugin has SP_PERSIST_DATA option: if (m_plugin->m_plugin_data) { // 1- call shutdownSaveData and get up-to-date plugin data. string saveData = m_plugin->shutdownSaveData(); // 2- store returned data: key is service/task categoryName + filter category name + pluginName string key(m_serviceName + m_plugin->getName() + m_pluginName.c_str()); if (!m_plugin->m_plugin_data->persistPluginData(key, saveData, m_serviceName)) { Logger::getLogger()->error("Filter %s has failed to save data [%s] for key %s and name %s", m_plugin->getName().c_str(), saveData.c_str(), key.c_str(), m_serviceName.c_str()); } } else { // Call filter plugin shutdown m_plugin->shutdown(); } } /** * Reconfigure method */ void PipelineFilter::reconfigure(const string& newConfig) { m_plugin->reconfigure(newConfig); } ================================================ FILE: C/common/pipeline_writer.cpp ================================================ /* * Fledge pipeline writer class * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pipeline_element.h> #include <filter_pipeline.h> #include <config_handler.h> #include <service_handler.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" using namespace std; /** * Constructor for the pipeline writer, the element that sits * at the end of every pipeline and branch */ PipelineWriter::PipelineWriter() { } /** * Ingest into a pipeline writer */ void PipelineWriter::ingest(READINGSET *readingSet) { if (m_debugger) { PipelineDebugger::DebuggerActions action = m_debugger->process(readingSet); switch (action) { case PipelineDebugger::Block: delete readingSet; return; case PipelineDebugger::NoAction: break; } } (*m_useData)(m_ingest, readingSet); } /** * Setup the pipeline writer */ bool PipelineWriter::setup(ManagementClient *mgmt, void *ingest, std::map<std::string, PipelineElement*>& categories) { return true; } /** * Initialise the pipeline writer */ bool PipelineWriter::init(OUTPUT_HANDLE* outHandle, OUTPUT_STREAM output) { m_useData = output; m_ingest = outHandle; return true; } /** * Shutdown the pipeline writer */ void PipelineWriter::shutdown(ServiceHandler *serviceHandler, ConfigHandler *configHandler) { } /** * Return if the pipeline writer is ready to receive data */ bool PipelineWriter::isReady() { return true; } ================================================ FILE: C/common/plugin_data.cpp ================================================ /* * Fledge persist plugin data class. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include <resultset.h> #include <where.h> #include <plugin_data.h> using namespace std; using namespace rapidjson; /** * PluginData constructor * @param client StorageClient pointer */ PluginData::PluginData(StorageClient* client) : m_storage(client), m_dataLoaded(false) { } /** * Load stored data for a given key. * * @param key Given key for data load * @return JSON string with found data or empty JSON data */ string PluginData::loadStoredData(const string& key) { // Set empty JSON document string foundData("{}"); const Condition conditionId(Equals); Where* wKey = new Where("key", conditionId, key); ResultSet* pluginData = m_storage->queryTable("plugin_data", wKey); if (pluginData != NULL && pluginData->rowCount()) { m_dataLoaded = true; // Get the first row only ResultSet::RowIterator it = pluginData->firstRow(); // Access the element ResultSet::Row* row = *it; if (row) { // Get column value ResultSet::ColumnValue* theVal = row->getColumn("data"); // get column type ColumnType type = row->getType("data"); if (type == JSON_COLUMN) { // Convert JSON object to string const rapidjson::Value* val = theVal->getJSON(); rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); val->Accept(writer); foundData = strbuf.GetString(); } else if (type == STRING_COLUMN) { // just a string foundData = theVal->getString(); } else { // Other column types are not supported } } } // Free resultset delete pluginData; // Return found data return foundData; } /** * Store plugin data for a given key. * * @param key The given key * @param data The JSON data to save (as string) * @param service_name The name of service * @return true on success, false otherwise. */ bool PluginData::persistPluginData(const string& key, const string& data, const string& service_name) { Document JSONData; JSONData.Parse(data.c_str()); if (JSONData.HasParseError()) { Logger::getLogger()->warn("Failed to persist data for key: %s and service name: %s, parse error in JSON data", key.c_str(), service_name.c_str()); return false; } bool ret = true; // Prepare WHERE key = const Condition conditionUpdate(Equals); Where wKey("key", conditionUpdate, key); InsertValues updateData; updateData.push_back(InsertValue("data", JSONData)); updateData.push_back(InsertValue("service_name", service_name)); if (m_dataLoaded) { // Try update first if (m_storage->updateTable("plugin_data", updateData, wKey) == -1) { // Update failure: try insert InsertValues insertData; insertData.push_back(InsertValue("key", key)); insertData.push_back(InsertValue("data", JSONData)); insertData.push_back(InsertValue("service_name", service_name)); if (m_storage->insertTable("plugin_data", insertData) == -1) { ret = false; } } } else { // We didn't load the data so do an insert first InsertValues insertData; insertData.push_back(InsertValue("key", key)); insertData.push_back(InsertValue("data", JSONData)); insertData.push_back(InsertValue("service_name", service_name)); if (m_storage->insertTable("plugin_data", insertData) == -1) { // The insert failed, so try an update before giving up if (m_storage->updateTable("plugin_data", updateData, wKey) == -1) { ret = false; } } else { m_dataLoaded = true; // Data is now in the database } } if (!ret) { Logger::getLogger()->warn("Failed to persist data for key: %s and service name: %s, unable to insert into storage", key.c_str(), service_name.c_str()); } return ret; } ================================================ FILE: C/common/process.cpp ================================================ /* * Fledge process class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ /** * Fledge process base class */ #include <iostream> #include <logger.h> #include <process.h> #include <service_record.h> #include <signal.h> #include <dlfcn.h> #include <execinfo.h> #include <cxxabi.h> #define LOG_SERVICE_NAME "Fledge Process" using namespace std; /** * Signal handler to log stack traces on fatal signals */ static void handler(int sig) { Logger *logger = Logger::getLogger(); void *array[20]; char buf[1024]; int size; // get void*'s for all entries on the stack size = backtrace(array, 20); // print out all the frames to stderr logger->fatal("Signal %d (%s) trapped:\n", sig, strsignal(sig)); char **messages = backtrace_symbols(array, size); for (int i = 0; i < size; i++) { Dl_info info; if (dladdr(array[i], &info) && info.dli_sname) { char *demangled = NULL; int status = -1; if (info.dli_sname[0] == '_') demangled = abi::__cxa_demangle(info.dli_sname, NULL, 0, &status); snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd---------", i, int(2 + sizeof(void*) * 2), array[i], status == 0 ? demangled : info.dli_sname == 0 ? messages[i] : info.dli_sname, (char *)array[i] - (char *)info.dli_saddr); free(demangled); } else { snprintf(buf, sizeof(buf), "%-3d %*p %s---------", i, int(2 + sizeof(void*) * 2), array[i], messages[i]); } logger->fatal("(%d) %s", i, buf); } free(messages); exit(1); } // Destructor FledgeProcess::~FledgeProcess() { delete m_client; delete m_storage; delete m_logger; } // Constructor FledgeProcess::FledgeProcess(int argc, char** argv) : m_stime(time(NULL)), m_argc(argc), m_arg_vals((const char**) argv), m_dryRun(false) { signal(SIGSEGV, handler); signal(SIGILL, handler); signal(SIGBUS, handler); signal(SIGFPE, handler); signal(SIGABRT, handler); string myName = LOG_SERVICE_NAME; try { m_core_mngt_host = getArgValue("--address="); m_core_mngt_port = atoi(getArgValue("--port=").c_str()); m_name = getArgValue("--name="); } catch (exception e) { throw runtime_error(string("Error while parsing required options: ") + e.what()); } // Look for the --dryrun flag for (int i = 1; i < argc; i++) { if (!strncmp(argv[i], "--dryrun", 8)) { m_dryRun = true; } } myName = m_name; m_logger = new Logger(myName); if (m_core_mngt_host.empty()) { throw runtime_error("Error: --address is not specified"); } else if (m_core_mngt_port == 0) { throw runtime_error("Error: --port is not specified"); } else if (m_name.empty()) { throw runtime_error("Error: --name is not specified"); } m_logger->setMinLevel("warning"); // Default to warnings, errors and fatal for log messages try { string minLogLevel = getArgValue("--loglevel="); if (!minLogLevel.empty()) { m_logger->setMinLevel(minLogLevel); } } catch (exception e) { throw runtime_error(string("Error while parsing optional options: ") + e.what()); } // Connection to Fledge core microservice m_client = new ManagementClient(m_core_mngt_host, m_core_mngt_port); // Create Audit Logger m_auditLogger = new AuditLogger(m_client); // Storage layer handle ServiceRecord storageInfo("Fledge Storage"); if (!m_client->getService(storageInfo)) { string errMsg("Unable to find storage service at "); errMsg += m_core_mngt_host; errMsg += ':'; errMsg += to_string(m_core_mngt_port); throw runtime_error(errMsg); } if (!(m_storage = new StorageClient(storageInfo.getAddress(), storageInfo.getPort()))) { string errMsg("Unable to connect to storage service at "); errMsg.append(storageInfo.getAddress()); errMsg += ':'; errMsg += to_string(storageInfo.getPort()); throw runtime_error(errMsg); } } /** * Get command line argument value like "--xyx=ABC" * Argument name to pass is "--xyz=" * * @param name The argument name (--xyz=) * @return The argument value if found or an emopty string */ string FledgeProcess::getArgValue(const string& name) const { for (int i=1; i < m_argc; i++) { if (strncmp(m_arg_vals[i], name.c_str(), name.length()) == 0) { // Return the option value (after "--xyx=ABC" return string(m_arg_vals[i] + name.length()); } } // Return empty string return string(""); } /** * Return storage client */ StorageClient* FledgeProcess::getStorageClient() const { return m_storage; } /** * Return management client */ ManagementClient* FledgeProcess::getManagementClient() const { return m_client; } /** * Return Logger */ Logger *FledgeProcess::getLogger() const { return m_logger; } ================================================ FILE: C/common/purge_result.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <purge_result.h> #include <string> #include <rapidjson/document.h> #include <sstream> using namespace std; using namespace rapidjson; /** * Construct a purge result from a JSON document returned from * the Fledge storage service. */ PurgeResult::PurgeResult(const std::string& json) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { throw new exception(); } if (doc.HasMember("removed")) { m_removed = doc["removed"].GetUint(); } else { m_removed = 0; } if (doc.HasMember("unsentPurged")) { m_unsentPurged = doc["unsentPurged"].GetUint(); } else { m_unsentPurged = 0; } if (doc.HasMember("unsentRetained")) { m_unsentRetained = doc["unsentRetained"].GetUint(); } else { m_unsentRetained = 0; } if (doc.HasMember("readings")) { m_remaining = doc["readings"].GetUint(); } else { m_remaining = 0; } } ================================================ FILE: C/common/pyexception.cpp ================================================ /* * Fledge Python runtime * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <pyruntime.h> #include <Python.h> #include <stdexcept> #include <stdarg.h> using namespace std; /** * Log an exception from a Python rotuine including the stack track formatted into the * error log. * * @param name The name to attached to the exception trace. */ void PythonRuntime::logException(const string& name) { PyObject* type; PyObject* value; PyObject* traceback; PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); PyObject* str_exc_value = PyObject_Repr(value); PyObject* pyExcValueStr = PyUnicode_AsEncodedString(str_exc_value, "utf-8", "Error ~"); const char* pErrorMessage = value ? PyBytes_AsString(pyExcValueStr) : "no error description."; Logger::getLogger()->fatal("Python Runtime: %s: Error '%s'", name.c_str(), pErrorMessage); // Check for numpy/pandas import errors const char *err1 = "implement_array_function method already has a docstring"; const char *err2 = "cannot import name 'check_array_indexer' from 'pandas.core.indexers'"; std::string fcn = ""; fcn += "def get_pretty_traceback(exc_type, exc_value, exc_tb):\n"; fcn += " import sys, traceback\n"; fcn += " lines = []\n"; fcn += " lines = traceback.format_exception(exc_type, exc_value, exc_tb)\n"; fcn += " return lines\n"; PyRun_SimpleString(fcn.c_str()); PyObject* mod = PyImport_ImportModule("__main__"); if (mod != NULL) { PyObject* method = PyObject_GetAttrString(mod, "get_pretty_traceback"); if (method != NULL) { PyObject* outList = PyObject_CallObject(method, Py_BuildValue("OOO", type, value, traceback)); if (outList != NULL) { if (PyList_Check(outList)) { Py_ssize_t listSize = PyList_Size(outList); for (Py_ssize_t i = 0; i < listSize; i++) { PyObject *tmp = PyUnicode_AsASCIIString(PyList_GetItem(outList, i)); Logger::getLogger()->fatal("%s", PyBytes_AsString(tmp)); } } else Logger::getLogger()->error("Expected a list"); } } Py_CLEAR(method); } // Reset error PyErr_Clear(); // Remove references Py_CLEAR(type); Py_CLEAR(value); Py_CLEAR(traceback); Py_CLEAR(str_exc_value); Py_CLEAR(pyExcValueStr); Py_CLEAR(mod); } ================================================ FILE: C/common/pyruntime.cpp ================================================ /* * Fledge Python runtime * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <pyruntime.h> #include <Python.h> #include <stdexcept> #include <stdarg.h> using namespace std; PythonRuntime *PythonRuntime::m_instance = 0; /** * Get PythonRuntime singleton instance for the process * * @return Singleton PythonRuntime instance */ PythonRuntime *PythonRuntime::getPythonRuntime() { if (!m_instance) { m_instance = new PythonRuntime; } return m_instance; } /** * Constructor */ PythonRuntime::PythonRuntime() { Py_Initialize(); PyEval_InitThreads(); PyThreadState *save = PyEval_SaveThread(); // Release the GIL } /** * Destructor */ PythonRuntime::~PythonRuntime() { PyGILState_STATE gstate = PyGILState_Ensure(); Py_Finalize(); } /** * Don't allow a copy constructor to be used */ PythonRuntime::PythonRuntime(const PythonRuntime& rhs) { throw runtime_error("Illegal attempt to copy a Python runtime"); } /** * Don't allow an assignment to make a copy */ PythonRuntime& PythonRuntime::operator=(const PythonRuntime& rhs) { throw runtime_error("Illegal attempt to copy a Python runtime via assignment"); } /** * Execute simple Python script passed as a string * * @param python The Python code to run */ void PythonRuntime::execute(const string& python) { PyGILState_STATE state = PyGILState_Ensure(); try { PyRun_SimpleString(python.c_str()); } catch (exception& e) { Logger::getLogger()->error("Exception %s executing Python '%s'", e.what(), python.c_str()); } PyGILState_Release(state); } /** * Call a Python function with a set of arguemnts * * The characters space, tab, colon and comma are ignored in format * strings (but not within format units such as s#). This can be used to * make long format strings a tad more readable. * * s (str or None) [const char *] * Convert a null-terminated C string to a Python str object using * 'utf-8' encoding. If the C string pointer is NULL, None is used. * * s# (str or None) [const char *, Py_ssize_t] * Convert a C string and its length to a Python str object using 'utf-8' * encoding. If the C string pointer is NULL, the length is ignored and * None is returned. * * y (bytes) [const char *] * This converts a C string to a Python bytes object. If the C string * pointer is NULL, None is returned. * * y# (bytes) [const char *, Py_ssize_t] * This converts a C string and its lengths to a Python object. If the * C string pointer is NULL, None is returned. * * z (str or None) [const char *] * Same as s. * * z# (str or None) [const char *, Py_ssize_t] * Same as s#. * * u (str) [const wchar_t *] * Convert a null-terminated wchar_t buffer of Unicode (UTF-16 or UCS-4) * data to a Python Unicode object. If the Unicode buffer pointer is NULL, * None is returned. * * u# (str) [const wchar_t *, Py_ssize_t] * Convert a Unicode (UTF-16 or UCS-4) data buffer and its length to * a Python Unicode object. If the Unicode buffer pointer is NULL, the * length is ignored and None is returned. * * U (str or None) [const char *] * Same as s. * * U# (str or None) [const char *, Py_ssize_t] * Same as s#. * * i (int) [int] * Convert a plain C int to a Python integer object. * * b (int) [char] * Convert a plain C char to a Python integer object. * * h (int) [short int] * Convert a plain C short int to a Python integer object. * * l (int) [long int] * Convert a C long int to a Python integer object. * * B (int) [unsigned char] * Convert a C unsigned char to a Python integer object. * * H (int) [unsigned short int] * Convert a C unsigned short int to a Python integer object. * * I (int) [unsigned int] * Convert a C unsigned int to a Python integer object. * * k (int) [unsigned long] * Convert a C unsigned long to a Python integer object. * * L (int) [long long] * Convert a C long long to a Python integer object. * * K (int) [unsigned long long] * Convert a C unsigned long long to a Python integer object. * * n (int) [Py_ssize_t] * Convert a C Py_ssize_t to a Python integer. * * c (bytes of length 1) [char] * Convert a C int representing a byte to a Python bytes object of length 1. * * C (str of length 1) [int] * Convert a C int representing a character to Python str object of length 1. * * d (float) [double] * Convert a C double to a Python floating point number. * * f (float) [float] * Convert a C float to a Python floating point number. * * D (complex) [Py_complex *] * Convert a C Py_complex structure to a Python complex number. * * O (object) [PyObject *] * Pass a Python object untouched (except for its reference count, which * is incremented by one). If the object passed in is a NULL pointer, it * is assumed that this was caused because the call producing the argument * found an error and set an exception. Therefore, Py_BuildValue() will * return NULL but won’t raise an exception. If no exception has been * raised yet, SystemError is set. * * S (object) [PyObject *] * Same as O. * * N (object) [PyObject *] /bin/bash: ft: command not found * * O& (object) [converter, anything] * Convert anything to a Python object through a converter function. The * function is called with anything (which should be compatible with void*) * as its argument and should return a “new” Python object, or NULL * if an error occurred. * * (items) (tuple) [matching-items] * Convert a sequence of C values to a Python tuple with the same number of items. * * [items] (list) [matching-items] * Convert a sequence of C values to a Python list with the same number of items. * * {items} (dict) [matching-items] * Convert a sequence of C values to a Python dictionary. Each pair of * consecutive C values adds one item to the dictionary, serving as key * and value, respectively. * * * @param fcn The name of the function to call * @param fmt The buildValue style format string for the arguments * @return PyObject* The function result */ PyObject *PythonRuntime::call(const string& fcn, const string& fmt, ...) { PyObject *rval = NULL; va_list ap; PyObject *mod, *method; PyGILState_STATE state = PyGILState_Ensure(); if ((mod = PyImport_ImportModule("__main__")) != NULL) { if ((method = PyObject_GetAttrString(mod, fcn.c_str())) != NULL) { va_start(ap, fmt); PyObject *args = Py_VaBuildValue(fmt.c_str(), ap); va_end(ap); rval = PyObject_Call(method, args, NULL); if (rval == NULL) { if (PyErr_Occurred()) { logException(fcn); PyErr_Print(); } } Py_CLEAR(method); } else { Logger::getLogger()->fatal("Method '%s' not found", fcn.c_str()); } // Remove references Py_CLEAR(mod); } else { Logger::getLogger()->fatal("Failed to import module"); } // Reset error PyErr_Clear(); PyGILState_Release(state); return rval; } /** * Call a Python function within a specified module. * * The using the same formattign rules as the call method above * * @param module The module in which the function was imported * @param fcn The name of the function to call * @param fmt The buildValue style format string for the arguments * @return PyObject* The function result */ PyObject *PythonRuntime::call(PyObject *module, const string& fcn, const string& fmt, ...) { PyObject *rval; va_list ap; PyObject *method; PyGILState_STATE state = PyGILState_Ensure(); if ((method = PyObject_GetAttrString(module, fcn.c_str())) != NULL) { va_start(ap, fmt); PyObject *args = Py_VaBuildValue(fmt.c_str(), ap); va_end(ap); rval = PyObject_Call(method, args, NULL); if (rval == NULL) { if (PyErr_Occurred()) { logException(fcn); PyErr_Print(); } } Py_CLEAR(method); } else { Logger::getLogger()->fatal("Method '%s' not found", fcn.c_str()); } // Reset error PyErr_Clear(); PyGILState_Release(state); return rval; } /** * Import a Python module * * @param name The name of the module to import * @return PyObject* The Python module */ PyObject *PythonRuntime::importModule(const string& name) { PyGILState_STATE state = PyGILState_Ensure(); PyObject *module = PyImport_ImportModule(name.c_str()); if (!module) { Logger::getLogger()->error("Failed to import Python module %s", name.c_str()); if (PyErr_Occurred()) { logException(name); } } PyGILState_Release(state); return module; } /** * Shutdown an instance of a Python runtime if one * has been started */ void PythonRuntime::shutdown() { if (!m_instance) { return; } delete m_instance; m_instance = NULL; } ================================================ FILE: C/common/pythonconfigcategory.cpp ================================================ /* * Fledge Python Config Category * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pythonconfigcategory.h> #include <logger.h> #include <stdexcept> using namespace std; /** * Construct a PythonConfigCategory from a DICT object returned by Python code. * * The PythonConfigCategory acts as a wrapper on the ConfigCategory class to convert to and * from configuration categories in C and Python. * * @param pyConfig The Python DICT */ PythonConfigCategory::PythonConfigCategory(PyObject *config) { if (!PyDict_Check(config)) { throw runtime_error("Invalid configuration category, expected Python DICT"); } // Fetch all items in configuration dict PyObject *dKey, *dValue; Py_ssize_t dPos = 0; // Fetch all Datapoints in 'reading' dict // dKey and dValue are borrowed references while (PyDict_Next(config, &dPos, &dKey, &dValue)) { string name = PyUnicode_AsUTF8(dKey); string description, type, def, value; if (!PyDict_Check(dValue)) { Logger::getLogger()->error("Configuration item %s is not an object", name.c_str()); throw runtime_error("Malformed configuration item"); } PyObject *obj = PyDict_GetItemString(dValue, "description"); if (obj) { description = PyUnicode_AsUTF8(obj); } else { Logger::getLogger()->error("Configuration item %s is missing a description", name.c_str()); throw runtime_error("Malformed configuration item, missing description"); } obj = PyDict_GetItemString(dValue, "type"); if (obj) { type = PyUnicode_AsUTF8(obj); } else { Logger::getLogger()->error("Configuration item %s is missing a type", name.c_str()); throw runtime_error("Malformed configuration item, missing type"); } obj = PyDict_GetItemString(dValue, "default"); if (obj) { def = PyUnicode_AsUTF8(obj); } else { Logger::getLogger()->error("Configuration item %s is missing a default value", name.c_str()); throw runtime_error("Malformed configuration item, missing default value"); } if (type.compare("enumeration") == 0) { vector<string> options; obj = PyDict_GetItemString(dValue, "options"); if (obj && PyList_Check(obj)) { Py_ssize_t listSize = PyList_Size(obj); for (Py_ssize_t i = 0; i < listSize; i++) { PyObject *str = PyList_GetItem(obj, i); string s = PyUnicode_AsUTF8(str); options.push_back(s); } addItem(name, description, def, value, options); } else { Logger::getLogger()->error("Configuration item %s is missing an options list", name.c_str()); throw runtime_error("Malformed configuration item, missing options"); } } else { addItem(name, description, type, def, value); } } } /** * Convert a ConfigCategory, into a PyObject * structure that can be passed to embedded Python code. * * @return PyObject* The Python representation of the configuration category as a DICT */ PyObject *PythonConfigCategory::toPython() { // Create object (dict) for reading Datapoints: // this will be added as the value for key 'readings' PyObject *category = PyDict_New(); // Get all datapoints for (auto it = m_items.begin(); it != m_items.end(); ++it) { PyObject *value = convertItem(*it); // Add Item: key and value if (value) { PyObject *key = PyUnicode_FromString((*it)->m_name.c_str()); PyDict_SetItem(category, key, value); Py_CLEAR(key); Py_CLEAR(value); } else { Logger::getLogger()->info("Unable to convert configuration item '%s' of configuration category '%s' to Python", (*it)->m_name.c_str(), m_name.c_str()); } } return category; } /** * Convert a single datapoint into a Pythn object * * @param dp The datapoint to convert * @return The pointer to a converted Python Object or NULL if the conversion failed */ PyObject *PythonConfigCategory::convertItem(CategoryItem *item) { PyObject *pyItem = PyDict_New(); PyObject *value = PyUnicode_FromString(item->m_displayName.c_str()); PyObject *key = PyUnicode_FromString("displayName"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); value = PyUnicode_FromString(item->m_type.c_str()); key = PyUnicode_FromString("type"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); value = PyUnicode_FromString(item->m_default.c_str()); key = PyUnicode_FromString("default"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); value = PyUnicode_FromString(item->m_value.c_str()); key = PyUnicode_FromString("value"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); if (item->m_description.length()) { value = PyUnicode_FromString(item->m_description.c_str()); key = PyUnicode_FromString("description"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_order.length()) { value = PyUnicode_FromString(item->m_order.c_str()); key = PyUnicode_FromString("order"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_readonly.length()) { value = PyUnicode_FromString(item->m_readonly.c_str()); key = PyUnicode_FromString("readonly"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_mandatory.length()) { value = PyUnicode_FromString(item->m_mandatory.c_str()); key = PyUnicode_FromString("mandatory"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_deprecated.length()) { value = PyUnicode_FromString(item->m_deprecated.c_str()); key = PyUnicode_FromString("deprecated"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_length.length()) { value = PyUnicode_FromString(item->m_length.c_str()); key = PyUnicode_FromString("length"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_minimum.length()) { value = PyUnicode_FromString(item->m_minimum.c_str()); key = PyUnicode_FromString("minimum"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_maximum.length()) { value = PyUnicode_FromString(item->m_maximum.c_str()); key = PyUnicode_FromString("maximum"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } if (item->m_filename.length()) { value = PyUnicode_FromString(item->m_filename.c_str()); key = PyUnicode_FromString("filename"); PyDict_SetItem(pyItem, key, value); Py_CLEAR(key); Py_CLEAR(value); } return pyItem; } ================================================ FILE: C/common/pythonreading.cpp ================================================ /* * Fledge Python Reading * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch * * Extreme caution needs to be taken with these Python interfaces * classes, especially with the use of numpy which is not written * to support multiple imports of the package due to the use * of global variables within numpy itself. Hence we import numpy * once by use of the import_array() macro. This macro also has * issues a it contians an embedded return statement. */ #include <pythonreading.h> #include <pyruntime.h> #include <stdexcept> #define PY_ARRAY_UNIQUE_SYMBOL PyArray_API_FLEDGE #include <numpy/npy_common.h> #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <numpy/ndarraytypes.h> #include <numpy/ndarrayobject.h> #undef NUMPY_IMPORT_ARRAY_RETVAL #define NUMPY_IMPORT_ARRAY_RETVAL 0 bool PythonReading::doneNumPyImport = false; using namespace std; /** * Construct a PythonReading from a DICT object returned by Python code. * * The PythonReading acts as a wrapper on the Reading class to convert to and * from Readings in C and Python. * * @param pyReading The Python DICT */ PythonReading::PythonReading(PyObject *pyReading) { // Get 'asset_code' value: borrowed reference. PyObject *assetCode = PyDict_GetItemString(pyReading, "asset"); if (!assetCode) { assetCode = PyDict_GetItemString(pyReading, "asset_code"); } // Get 'reading' value: borrowed reference. PyObject *reading = PyDict_GetItemString(pyReading, "readings"); if (!reading) { reading = PyDict_GetItemString(pyReading, "reading"); } // Keys not found or reading is not a dict if (!assetCode || !reading || !PyDict_Check(reading)) { // Failure if (PyErr_Occurred()) { throw runtime_error(errorMessage()); } if (!assetCode) throw runtime_error("Reading has no asset code element."); if (!reading) throw runtime_error("Reading is missing the reading element which shuld contain the data."); else throw runtime_error("The reading element in the python Reading is of an incorrect type, it should be a Python DICT."); } if (PyUnicode_Check(assetCode)) { m_asset = PyUnicode_AsUTF8(assetCode); } else if (PyBytes_Check(assetCode)) { m_asset = PyBytes_AsString(assetCode); } else { throw runtime_error("Unable to parse the asset code value. Asset codes should be a string"); } // Fetch all Datapoints in 'reading' dict PyObject *dKey, *dValue; Py_ssize_t dPos = 0; // Fetch all Datapoints in 'readings' dict // dKey and dValue are borrowed references while (PyDict_Next(reading, &dPos, &dKey, &dValue)) { DatapointValue *dataPoint = getDatapointValue(dValue); if (dataPoint) { // Deteck Python keys like reading[b'ema'] // or reading['ema'] if (PyUnicode_Check(dKey)) { m_values.emplace_back(new Datapoint( string(PyUnicode_AsUTF8(dKey)), *dataPoint)); } else { m_values.emplace_back(new Datapoint( string(PyBytes_AsString(dKey)), *dataPoint)); } // Remove temp objects delete dataPoint; } } /** *Set id, uuid, ts and user_ts of the original data */ // Get 'id' value: borrowed reference. PyObject *id = PyDict_GetItemString(pyReading, "id"); if (id && PyLong_Check(id)) { // Set id m_id = PyLong_AsUnsignedLong(id); m_has_id = true; } else { m_has_id = false; m_id = 0; } // New reference, to delete PyObject *key = PyUnicode_FromString("timestamp"); // Get 'ts' value: borrowed reference. // Need to use PyDict_GetItemWithError in order to avoid an exception PyObject *ts = PyDict_GetItemWithError(pyReading, key); if (!(ts && PyUnicode_Check(ts))) { ts = PyDict_GetItemString(pyReading, "ts"); } if (ts && PyUnicode_Check(ts)) { // Set timestamp const char *ts_str = PyUnicode_AsUTF8(ts); setTimestamp(ts_str); } else { m_timestamp.tv_sec = 0; m_timestamp.tv_usec = 0; // Logger::getLogger()->debug("PythonReading c'tor: Couldn't parse 'ts' "); } Py_CLEAR(key); // New reference, to delete key = PyUnicode_FromString("user_ts"); // Get 'user_ts' value: borrowed reference. PyObject *uts = PyDict_GetItemWithError(reading, key); if (!uts) { uts = PyDict_GetItemWithError(pyReading, key); } if (uts && PyUnicode_Check(uts)) { // Set user timestamp const char *ts_str = PyUnicode_AsUTF8(uts); setUserTimestamp(ts_str); } else { //Logger::getLogger()->debug("PythonReading c'tor: Couldn't parse 'user_ts' "); m_userTimestamp.tv_sec = 0; m_userTimestamp.tv_usec = 0; } Py_CLEAR(key); } /** * Given a Python value convert it into a DatapointValue * * @param value The python object to convert * @return The converted DatapointValue or NULL if the conversion was not possible */ DatapointValue *PythonReading::getDatapointValue(PyObject *value) { InitNumPy(); if (!value) { throw runtime_error("NULL datapoint value in Python reading"); } DatapointValue *dataPoint = NULL; if (PyLong_Check(value)) // Integer T_INTEGER { dataPoint = new DatapointValue((long)PyLong_AsUnsignedLongMask(value)); } else if (PyFloat_Check(value)) // Float T_FLOAT { dataPoint = new DatapointValue(PyFloat_AS_DOUBLE(value)); } else if (PyBytes_Check(value)) // String T_STRING { string str = PyBytes_AsString(value); fixQuoting(str); dataPoint = new DatapointValue(str); } else if (PyUnicode_Check(value)) // String T_STRING { string str = PyUnicode_AsUTF8(value); fixQuoting(str); dataPoint = new DatapointValue(str); } else if (PyDict_Check(value)) // Nested object T_DP_DICT { vector<Datapoint *> *values = new vector<Datapoint *>; Py_ssize_t dPos = 0; PyObject *dKey, *dValue; while (PyDict_Next(value, &dPos, &dKey, &dValue)) { DatapointValue *dpv = getDatapointValue(dValue); if (dpv) { if (PyUnicode_Check(dKey)) { values->emplace_back(new Datapoint(string(PyUnicode_AsUTF8(dKey)), *dpv)); } else { values->emplace_back(new Datapoint(string(PyBytes_AsString(dKey)), *dpv)); } // Remove temp objects delete dpv; } } dataPoint = new DatapointValue(values, true); } else if (PyList_Check(value)) // List of data points or floats { Py_ssize_t listSize = PyList_Size(value); // Find out what the list contains PyObject *item0 = PyList_GetItem(value, 0); if (item0 == NULL) { return NULL; } if (PyFloat_Check(item0)) // List of floats T_FLOAT_ARRAY { vector<double> values; for (Py_ssize_t i = 0; i < listSize; i++) { double d = PyFloat_AS_DOUBLE(PyList_GetItem(value, i)); values.push_back(d); } dataPoint = new DatapointValue(values); } else if (PyList_Check(item0)) // 2D array T_2D_FLOAT_ARRAY { vector<vector<double>* > values; for (Py_ssize_t i = 0; i < listSize; i++) { vector<double> *row = new vector<double>; PyObject *pyRow = PyList_GetItem(value, i); for (Py_ssize_t j = 0; j < PyList_Size(pyRow); j++) { double d = PyFloat_AS_DOUBLE(PyList_GetItem(pyRow, j)); row->push_back(d); } values.push_back(row); } dataPoint = new DatapointValue(values); for (auto& row : values) delete row; } else if (PyDict_Check(item0)) // List of datapoints T_DP_LIST { vector<Datapoint *>* values = new vector<Datapoint *>; for (Py_ssize_t i = 0; i < listSize; i++) { PyObject *item = PyList_GetItem(value, i); if (PyDict_Check(item)) { PyObject *key, *val; PyDict_Next(item, 0, &key, &val); DatapointValue *dpv = getDatapointValue(val); if (dpv) { values->emplace_back(new Datapoint(string(PyBytes_AsString(key)), *dpv)); // Remove temp objects delete dpv; } } } dataPoint = new DatapointValue(values, false); } } else if (PyArray_Check(value)) // Numpy array { PyArrayObject *array = (PyArrayObject *)value; int item_size = PyArray_ITEMSIZE(array); if (PyArray_NDIM(array) == 1) // Databuffer T_DATABUFFER { npy_intp *dims = PyArray_DIMS(array); int n_items = (int)dims[0]; DataBuffer *buffer = new DataBuffer(item_size, n_items); memcpy(buffer->getData(), PyArray_DATA(array), n_items * item_size); dataPoint = new DatapointValue(buffer); } else if (PyArray_NDIM(array) == 2) // Image T_IMAGE { npy_intp *dims = PyArray_DIMS(array); int height = (int)dims[0]; int width = (int)dims[1]; int depth = item_size * 8; // In bits DPImage *image = new DPImage(width, height, depth, PyArray_DATA(array)); dataPoint = new DatapointValue(image); } else if (PyArray_NDIM(array) == 3) // RGB Image T_IMAGE { npy_intp *dims = PyArray_DIMS(array); if ((int)dims[2] == 3) { int height = (int)dims[0]; int width = (int)dims[1]; int depth = 24; // In bits DPImage *image = new DPImage(width, height, depth, PyArray_DATA(array)); dataPoint = new DatapointValue(image); } else { Logger::getLogger()->error("Received 3D numpy array that is not RGB image"); } } else { Logger::getLogger()->error("Encountered a numpy array with more than 3 dimensions in a Python data point %s. This is currently not supported"); } } else { Logger::getLogger()->info("PythonReading::getDatapointValue: UNSUPPORTED"); PyTypeObject *type = value->ob_type; Logger::getLogger()->error("Encountered an unsupported type '%s' when create a reading from Python", type->tp_name); } return dataPoint; } /** * Convert a PythonReading, which is just a Reading, into a PyObject * structure that can be passed to embedded Python code. * * @param changeKeys Set DICT keys as reading/asset_code if true * or readings/asset if false * @param useBytesString Whether to use DICT keys as BytesString * and string values as BytesString * @return PyObject* The Python representation of the readings as a DICT */ PyObject *PythonReading::toPython(bool changeKeys, bool useBytesString) { // Create object (dict) for reading Datapoints: // this will be added as the value for key 'readings' PyObject *dataPoints = PyDict_New(); // Get all datapoints for (auto it = m_values.begin(); it != m_values.end(); ++it) { // Pass BytesString switch PyObject *value = convertDatapoint(*it, useBytesString); // Add Datapoint: key and value if (value) { PyObject *key = useBytesString ? PyBytes_FromString((*it)->getName().c_str()) : PyUnicode_FromString((*it)->getName().c_str()); PyDict_SetItem(dataPoints, key, value); Py_CLEAR(key); Py_CLEAR(value); } else { Logger::getLogger()->info("Unable to convert datapoint '%s' of reading '%s' tp Python", (*it)->getName().c_str(), m_asset.c_str()); } } // Create an object (dict) with 'asset_code' and 'readings' key PyObject *readingObject = PyDict_New(); // Add reading datapoints PyObject *key = PyUnicode_FromString(changeKeys ? "reading" : "readings"); PyDict_SetItem(readingObject, key, dataPoints); Py_CLEAR(key); // Add reading asset name PyObject *assetVal = useBytesString ? PyBytes_FromString(m_asset.c_str()) : PyUnicode_FromString(m_asset.c_str()); key = PyUnicode_FromString(changeKeys ? "asset_code" : "asset"); PyDict_SetItem(readingObject, key, assetVal); Py_CLEAR(key); // Add reading id PyObject *readingId = PyLong_FromUnsignedLong(m_id); key = PyUnicode_FromString("id"); PyDict_SetItem(readingObject, key, readingId); Py_CLEAR(key); // Add reading timestamp // PyObject *readingTs = PyLong_FromUnsignedLong(m_timestamp.tv_sec); string s = this->getAssetDateTime(FMT_DEFAULT) + "+00:00"; PyObject *readingTs = PyUnicode_FromString(s.c_str()); key = PyUnicode_FromString("ts"); PyDict_SetItem(readingObject, key, readingTs); Py_CLEAR(key); // Add reading user timestamp //PyObject *readingUserTs = PyLong_FromUnsignedLong(m_userTimestamp.tv_sec); s = this->getAssetDateUserTime(FMT_DEFAULT) + "+00:00"; PyObject *readingUserTs = PyUnicode_FromString(s.c_str()); key = PyUnicode_FromString("user_ts"); PyDict_SetItem(readingObject, key, readingUserTs); Py_CLEAR(key); // Remove temp objects Py_CLEAR(dataPoints); Py_CLEAR(assetVal); Py_CLEAR(readingId); Py_CLEAR(readingTs); Py_CLEAR(readingUserTs); return readingObject; } /** * Convert a single datapoint into a Pythn object * * @param dp The datapoint to convert * @param bytesString Wheter to set PyObject string as PyBytes or PyUnicode * @return The pointer to a converted Python Object or NULL if the conversion failed */ PyObject *PythonReading::convertDatapoint(Datapoint *dp, bool bytesString) { PyObject *value = NULL; DatapointValue::dataTagType dataType = dp->getData().getType(); if (dataType == DatapointValue::dataTagType::T_INTEGER) { value = PyLong_FromLong(dp->getData().toInt()); } else if (dataType == DatapointValue::dataTagType::T_FLOAT) { value = PyFloat_FromDouble(dp->getData().toDouble()); } else if (dataType == DatapointValue::dataTagType::T_STRING) { value = bytesString ? PyBytes_FromString(dp->getData().toStringValue().c_str()) : PyUnicode_FromString(dp->getData().toStringValue().c_str()); } else if (dataType == DatapointValue::dataTagType::T_FLOAT_ARRAY) { vector<double>* values = dp->getData().getDpArr();; int i = 0; value = PyList_New(values->size()); for (auto it = values->begin(); it != values->end(); ++it) { PyList_SetItem(value, i++, PyFloat_FromDouble(*it)); } } else if (dataType == DatapointValue::dataTagType::T_2D_FLOAT_ARRAY) { vector<vector<double>* > *vec = dp->getData().getDp2DArr(); value = PyList_New(vec->size()); int rowNo = 0; for (auto row : *vec) { int i = 0; PyObject *pyRow = PyList_New(row->size()); for (auto& d : *row) { PyList_SetItem(pyRow, i++, PyFloat_FromDouble(d)); } PyList_SetItem(value, rowNo++, pyRow); } } else if (dataType == DatapointValue::dataTagType::T_DATABUFFER) { // PythonRuntime::getPythonRuntime()->initNumPy(); InitNumPy(); DataBuffer *dbuf = dp->getData().getDataBuffer(); npy_intp dim = dbuf->getItemCount(); enum NPY_TYPES type; switch (dbuf->getItemSize()) { case 1: type = NPY_UBYTE; break; case 2: type = NPY_UINT16; break; case 4: type = NPY_UINT32; break; case 8: type = NPY_UINT64; break; default: break; } PyGILState_STATE state = PyGILState_Ensure(); value = PyArray_SimpleNewFromData(1, &dim, type, dbuf->getData()); PyGILState_Release(state); #if 0 Py_buffer *buffer = (Py_buffer *)malloc(sizeof(Py_buffer)); DataBuffer *dbuf = (*it)->getData().getDataBuffer(); buffer->buf = dbuf->getData(); buffer->itemsize = dbuf->m_itemSize; buffer->len = dbuf->len *dbuf->itemSize; #endif } else if (dataType == DatapointValue::dataTagType::T_IMAGE) { //PythonRuntime::getPythonRuntime()->initNumPy(); InitNumPy(); DPImage *image = dp->getData().getImage(); if (image->getDepth() == 24) {{ npy_intp dim[3]; dim[0] = image->getHeight(); dim[1] = image->getWidth(); dim[2] = 3; enum NPY_TYPES type = NPY_UBYTE; PyGILState_STATE state = PyGILState_Ensure(); value = PyArray_SimpleNewFromData(3, dim, type, image->getData()); PyGILState_Release(state); } } else { npy_intp dim[2]; dim[0] = image->getHeight(); dim[1] = image->getWidth(); enum NPY_TYPES type; switch (image->getDepth()) { case 8: type = NPY_UBYTE; break; case 16: type = NPY_UINT16; break; case 32: type = NPY_UINT32; break; case 64: type = NPY_UINT64; break; default: break; } PyGILState_STATE state = PyGILState_Ensure(); value = PyArray_SimpleNewFromData(2, dim, type, image->getData()); PyGILState_Release(state); } } else if (dataType == DatapointValue::dataTagType::T_DP_DICT) { vector<Datapoint *>* children = dp->getData().getDpVec();; value = PyDict_New(); for (auto child = children->begin(); child != children->end(); ++child) { PyObject *childValue = convertDatapoint(*child); // Add Datapoint: key and value PyObject *key = PyUnicode_FromString((*child)->getName().c_str()); PyDict_SetItem(value, key, childValue); Py_CLEAR(key); Py_CLEAR(childValue); } } else if (dataType == DatapointValue::dataTagType::T_DP_LIST) { vector<Datapoint *>* children = dp->getData().getDpVec(); int i = 0; value = PyList_New(children->size()); for (auto child = children->begin(); child != children->end(); ++child) { PyObject *childValue = convertDatapoint(*child); // TODO complete // Add Datapoint: key and value PyObject *key = PyUnicode_FromString((*child)->getName().c_str()); PyObject *dict = PyDict_New(); PyDict_SetItem(dict, key, childValue); PyList_SetItem(value, i++, dict); Py_CLEAR(key); Py_CLEAR(childValue); } } else { Logger::getLogger()->info("Unable to convert datapoint type '%s' to Python, defaulting to string representation", dp->getData().getTypeStr().c_str()); value = PyUnicode_FromString(dp->getData().toString().c_str()); } return value; } /** * Retrieve the error message last raised in Python * * @return string The Python error message */ string PythonReading::errorMessage() { //Get error message PyObject *pType, *pValue, *pTraceback; PyErr_Fetch(&pType, &pValue, &pTraceback); PyErr_NormalizeException(&pType, &pValue, &pTraceback); PyObject *str_exc_value = PyObject_Repr(pValue); PyObject *pyExcValueStr = PyUnicode_AsEncodedString(str_exc_value, "utf-8", "Error ~"); // NOTE from : // https://docs.python.org/2/c-api/exceptions.html // // The value and traceback object may be NULL // even when the type object is not. string errorMessage = pValue ? PyBytes_AsString(pyExcValueStr) : "no error description."; Logger::getLogger()->error("Exception from python interpreter: %s", errorMessage.c_str()); // Reset error PyErr_Clear(); // Remove references Py_CLEAR(pType); Py_CLEAR(pValue); Py_CLEAR(pTraceback); Py_CLEAR(str_exc_value); Py_CLEAR(pyExcValueStr); return errorMessage; } /** * Fix the quoting if the datapoint contains unescaped quotes * * @param str String to fix the quoting of */ void PythonReading::fixQuoting(string& str) { string newString; bool escape = false; for (int i = 0; i < str.length(); i++) { if (str[i] == '\"' && escape == false) { newString += '\\'; newString += '\\'; newString += '\\'; } else if (str[i] == '\\') { escape = !escape; } newString += str[i]; } str = newString; } /** * Return true of the Python object is an array. This is mostly for testing * and overcomes an issue with including the numpy header files multiple times. * * @param obj The Pythin object to test * @return true if the Python object is a numpy array */ bool PythonReading::isArray(PyObject *obj) { return PyArray_Check(obj); } /** * Import NumPy. Due to the way numpy uses global variables we must only do * this once in a single exeutable as multiple imports result in crashes. */ int PythonReading::InitNumPy() { if (!PythonReading::doneNumPyImport) { PythonReading::doneNumPyImport = true; // Note the following is a macro in the numpy header file that has an embedded return // in the case of failure. Hence the need to return a value. Assume no code after this // line is run PyGILState_STATE state = PyGILState_Ensure(); if (PyImport_ImportModule("numpy.core.multiarray") == NULL) throw runtime_error(errorMessage()); import_array(); PyGILState_Release(state); } return 0; }; ================================================ FILE: C/common/pythonreadingset.cpp ================================================ /* * Fledge Python Reading Set * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <pythonreadingset.h> #include <pythonreading.h> #include <stdexcept> using namespace std; /** * Set id, uuid, ts and user_ts in the reading object * * @param newReading Reading object to update * @param readingList PyObject containing this reading object * @param fillIfMissing If True, only fill ID/TS fields if not set already */ void PythonReadingSet::setReadingAttr(Reading* newReading, PyObject *readingList, bool fillIfMissing) { if (!newReading) return; // Get 'id' value: borrowed reference. PyObject* id = PyDict_GetItemString(readingList, "id"); bool fill = (!fillIfMissing || (fillIfMissing && newReading->getId()==0)); if (fill && id && PyLong_Check(id)) { // Set id newReading->setId(PyLong_AsUnsignedLong(id)); } // Get 'ts' value: borrowed reference. PyObject* ts = PyDict_GetItemString(readingList, "ts"); fill = (!fillIfMissing || (fillIfMissing && newReading->getTimestamp()==0)); if (fill && ts) { // Convert a timestamp of the form '2019-01-07 19:06:35.366100+01:00' const char *ts_str = PyUnicode_AsUTF8(ts); newReading->setTimestamp(ts_str); } // Get 'user_ts' value: borrowed reference. PyObject* uts = PyDict_GetItemString(readingList, "timestamp"); fill = (!fillIfMissing || (fillIfMissing && newReading->getUserTimestamp()==0)); if (fill && uts) { // Convert a timestamp of the form '2019-01-07 19:06:35.366100+01:00' const char *ts_str = PyUnicode_AsUTF8(uts); newReading->setUserTimestamp(ts_str); } // Get 'ts' value: borrowed reference. PyObject* userts = PyDict_GetItemString(readingList, "user_ts"); fill = (!fillIfMissing || (fillIfMissing && newReading->getUserTimestamp()==0)); if (fill && userts) { // Convert a timestamp of the form '2019-01-07 19:06:35.366100+01:00' const char *ts_str = PyUnicode_AsUTF8(userts); newReading->setUserTimestamp(ts_str); } // if User TS is still not filled, copy TS into it fill = (fillIfMissing && newReading->getUserTimestamp()==0); //Logger::getLogger()->debug("fill=%s, newReading->getUserTimestamp()=%d, newReading->getTimestamp()=%d", fill?"True":"False", newReading->getUserTimestamp(), newReading->getTimestamp()); if (fill) { struct timeval tVal; newReading->getTimestamp(&tVal); newReading->setUserTimestamp(tVal); Logger::getLogger()->debug("Copied TS into user TS: newReading->getUserTimestamp()=%d", newReading->getUserTimestamp()); } // if TS is still not filled, copy User TS into it fill = (fillIfMissing && newReading->getTimestamp()==0); //Logger::getLogger()->debug("fill=%s, newReading->getUserTimestamp()=%d, newReading->getTimestamp()=%d", fill?"True":"False", newReading->getUserTimestamp(), newReading->getTimestamp()); if (fill) { struct timeval tVal; newReading->getUserTimestamp(&tVal); newReading->setTimestamp(tVal); Logger::getLogger()->debug("Copied user TS into TS: newReading->getUserTimestamp()=%d", newReading->getUserTimestamp()); } } /** * Construct PythonReadingSet from a python list object that contains a * list of readings * * @param set A Python object pointer that contians a list of readings */ PythonReadingSet::PythonReadingSet(PyObject *set) { if (PyList_Check(set)) { Logger::getLogger()->debug("PythonReadingSet c'tor: LIST of size %d", PyList_Size(set)); } else if (PyDict_Check(set)) { Logger::getLogger()->debug("PythonReadingSet c'tor: DICT of size %d", PyDict_Size(set)); } if (PyList_Check(set)) { Py_ssize_t listSize = PyList_Size(set); for (Py_ssize_t i = 0; i < listSize; i++) { PyObject *pyReading = PyList_GetItem(set, i); PythonReading *reading = new PythonReading(pyReading); setReadingAttr(reading, set, true); m_readings.push_back(reading); m_count++; m_last_id = reading->getId(); } } else if (PyDict_Check(set)) { PythonReading *reading = new PythonReading(set); if (reading) { setReadingAttr(reading, set, true); m_readings.push_back(reading); m_count++; m_last_id = reading->getId(); } } else { Logger::getLogger()->error("Expected a Python list/dict as a reading set when constructing a PythonReadingSet"); throw runtime_error("Expected a Python list/dict as a reading set when constructing a PythonReadingSet"); } } /** * Convert the ReadingSet to a Python List * * @return A Python object that contains the set of readings as a Python list */ PyObject *PythonReadingSet::toPython(bool changeKeys) { PyObject *set = PyList_New(m_readings.size()); for (int i = 0; i < m_readings.size(); i++) { PythonReading *pyReading = (PythonReading *) m_readings[i]; PyList_SetItem(set, i, pyReading->toPython(changeKeys)); } return set; } ================================================ FILE: C/common/query.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <query.h> #include <sstream> #include <iostream> using namespace std; /** * Construct a query with a simple where clause * * @param where A pointer to the where condition */ Query::Query(Where *where) : m_where(where), m_limit(0), m_timebucket(0), m_distinct(false), m_join(0) { } /** * Construct a query with a where clause and aggregate response * * @param aggregate A ppointer to the aggregate operation to perform * @param where A pointer to the where condition */ Query::Query(Aggregate *aggregate, Where *where) : m_where(where), m_limit(0), m_timebucket(0), m_distinct(false), m_join(0) { m_aggregates.push_back(aggregate); } /** * Construct a timebucket query with a simple where clause * * @param timebuck A pointer to the timebucket definition * @param where A pointer to the where condition */ Query::Query(Timebucket *timebucket, Where *where) : m_where(where), m_limit(0), m_timebucket(timebucket), m_distinct(false), m_join(0) { } /** * Construct a timebucket query with a simple where clause and a limit on * the rows to return * * @param timebuck A pointer to the timebucket definition * @param where A pointer to the where condition * @param limit The number of rows to return */ Query::Query(Timebucket *timebucket, Where *where, unsigned int limit) : m_where(where), m_limit(limit), m_timebucket(timebucket), m_distinct(false), m_join(0) { } /** * Construct a query with a fixed set of returned values and a simple where clause * * @params returns The set of rows to return * @params where The where clause */ Query::Query(vector<Returns *> returns, Where *where) : m_where(where), m_limit(0), m_timebucket(0), m_distinct(false), m_join(0) { for (auto it = returns.cbegin(); it != returns.cend(); ++it) { m_returns.push_back(*it); } } /** * Construct a query with a fixed set of returned values and a simple where clause * and return a limited set of rows * * @params returns The set of rows to return * @params where The where clause * @param limit The numebr of rows to return */ Query::Query(vector<Returns *> returns, Where *where, unsigned int limit) : m_where(where), m_limit(limit), m_timebucket(0), m_distinct(false), m_join(0) { for (auto it = returns.cbegin(); it != returns.cend(); ++it) { m_returns.push_back(*it); } } /** * Construct a simple query to return certain columns from a table * * @param returns The rows to return */ Query::Query(vector<Returns *> returns) : m_where(0), m_limit(0), m_timebucket(0), m_distinct(false), m_join(0) { for (auto it = returns.cbegin(); it != returns.cend(); ++it) { m_returns.push_back(*it); } } /** * Construct a simple query to return a certain column from a table * * @param returns The rows to return */ Query::Query(Returns *returns) : m_where(0), m_limit(0), m_timebucket(0), m_distinct(false), m_join(0) { m_returns.push_back(returns); } /** * Destructor for a query object */ Query::~Query() { if (m_where) { delete m_where; } for (auto it = m_aggregates.cbegin(); it != m_aggregates.cend(); ++it) { delete *it; } for (auto it = m_sort.cbegin(); it != m_sort.cend(); ++it) { delete *it; } for (auto it = m_returns.cbegin(); it != m_returns.cend(); ++it) { delete *it; } if (m_timebucket) { delete m_timebucket; } if (m_join) { delete m_join; } } /** * Add a aggregate operation to an existing query object * * @param aggregate The aggregate operation to add */ void Query::aggregate(Aggregate *aggregate) { m_aggregates.push_back(aggregate); } /** * Add a sort operation to an existing query * * @param sort The sort operation to add */ void Query::sort(Sort *sort) { m_sort.push_back(sort); } /** * Add a group operation to a query * * @param column The column to group by */ void Query::group(const string& column) { m_group = column; } /** * Limit the numebr of rows returned by the query * * @param limit The number of rows to limit the return to */ void Query::limit(unsigned int limit) { m_limit = limit; } /** * Add a timebucket operation to an existing query * * @param timebucket The timebucket operation to add to the query */ void Query::timebucket(Timebucket *timebucket) { m_timebucket = timebucket; } /** * Limit the query to return just a single column * * @param returns The column to return */ void Query::returns(Returns *returns) { m_returns.push_back(returns); } /** * Limit the columns returned by the query * * @param returns The columns to return */ void Query::returns(vector<Returns *> returns) { for (auto it = returns.cbegin(); it != returns.cend(); ++it) { m_returns.push_back(*it); } } /** * Add a join clause to a query * * @param join A pointer to a Join onject */ void Query::join(Join *join) { m_join = join; } /** * Add a distinct value modifier to the query */ void Query::distinct() { m_distinct = true; } /** * Return the JSON payload for a where clause */ const string Query::toJSON() const { ostringstream json; bool first = true; json << "{ "; if (m_where) { if (! first) json << ", "; json << "\"where\" : " << m_where->toJSON(); first = false; } if (m_join) { if (! first) json << ", "; first = false; json << m_join->toJSON(); } switch (m_aggregates.size()) { case 0: break; case 1: if (! first) json << ", "; json << "\"aggregate\" : " << m_aggregates.front()->toJSON(); first = false; break; default: if (! first) json << ", "; json << "\"aggregate\" : [ "; for (auto it = m_aggregates.cbegin(); it != m_aggregates.cend(); ++it) { if (it != m_aggregates.cbegin()) json << ", "; json << (*it)->toJSON(); } json << " ]"; first = false; break; } if (!m_group.empty()) { if (! first) json << ", "; json << "\"group\" : \"" << m_group << "\""; first = false; } switch (m_sort.size()) { case 0: break; case 1: if (! first) json << ", "; json << "\"sort\" : " << m_sort.front()->toJSON(); first = false; break; default: if (! first) json << ", "; json << "\"sort\" : [ "; for (auto it = m_sort.cbegin(); it != m_sort.cend(); ++it) { if (it != m_sort.cbegin()) json << ", "; json << (*it)->toJSON(); } json << " ], "; first = false; break; } if (m_timebucket) { if (! first) json << ", "; json << "\"timebucket\" : " << m_timebucket->toJSON(); first = false; } if (m_limit) { if (! first) json << ", "; json << "\"limit\" : " << m_limit; first = false; } if (m_returns.size()) { if (! first) json << ", "; json << "\"return\" : [ "; for (auto it = m_returns.cbegin(); it != m_returns.cend(); ++it) { if (it != m_returns.cbegin()) json << ", "; json << (*it)->toJSON(); } json << " ]"; first = false; } if (m_distinct) { if (! first) json << ", "; json << "\"modifier\" : \"distinct\""; first = false; } json << " }"; return json.str(); } ================================================ FILE: C/common/reading.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <reading.h> #include <ctime> #include <string> #include <sstream> #include <iostream> #include <mutex> #include <time.h> #include <string.h> #include <logger.h> #include <rapidjson/document.h> using namespace std; using namespace rapidjson; std::vector<std::string> Reading::m_dateTypes = { DEFAULT_DATE_TIME_FORMAT, COMBINED_DATE_STANDARD_FORMAT, ISO8601_DATE_TIME_FORMAT, ISO8601_DATE_TIME_FORMAT // Version with milliseconds }; /** * Reading constructor * * A reading is a container for the values related to a single asset. * Each actual datavalue that relates to that asset is held within an * instance of a Datapoint class. */ Reading::Reading(const string& asset, Datapoint *value) : m_asset(asset), m_has_id(false), m_id(0) { m_values.push_back(value); // Store seconds and microseconds gettimeofday(&m_timestamp, NULL); // Initialise m_userTimestamp m_userTimestamp = m_timestamp; } /** * Reading constructor * * A reading is a container for the values related to a single asset. * Each actual datavalue that relates to that asset is held within an * instance of a Datapoint class. */ Reading::Reading(const string& asset, vector<Datapoint *> values) : m_asset(asset), m_has_id(false), m_id(0) { for (auto it = values.cbegin(); it != values.cend(); it++) { m_values.push_back(*it); } // Store seconds and microseconds gettimeofday(&m_timestamp, NULL); // Initialise m_userTimestamp m_userTimestamp = m_timestamp; } /** * Reading constructor * * A reading is a container for the values related to a single asset. * Each actual datavalue that relates to that asset is held within an * instance of a Datapoint class. */ Reading::Reading(const string& asset, vector<Datapoint *> values, const string& ts) : m_asset(asset), m_has_id(false), m_id(0) { for (auto it = values.cbegin(); it != values.cend(); it++) { m_values.push_back(*it); } stringToTimestamp(ts, &m_timestamp); // Initialise m_userTimestamp m_userTimestamp = m_timestamp; } /** * Construct a reading with datapoints given as JSON */ Reading::Reading(const string& asset, const string& datapoints) : m_asset(asset), m_has_id(false), m_id(0) { Document d; if (d.Parse(datapoints.c_str()).HasParseError()) { throw runtime_error("Failed to parse reading datapoints " + datapoints); } for (Value::ConstMemberIterator itr = d.MemberBegin(); itr != d.MemberEnd(); ++itr) { string name = itr->name.GetString(); if (itr->value.IsInt64()) { long v = itr->value.GetInt64(); DatapointValue dpv(v); m_values.push_back(new Datapoint(name, dpv)); } else if (itr->value.IsDouble()) { double v = itr->value.GetDouble(); DatapointValue dpv(v); m_values.push_back(new Datapoint(name, dpv)); } else if (itr->value.IsString()) { string v = itr->value.GetString(); DatapointValue dpv(v); m_values.push_back(new Datapoint(name, dpv)); } else if (itr->value.IsObject()) { // Map objects as nested datapoints vector<Datapoint *> *values = JSONtoDatapoints(itr->value); DatapointValue dpv(values, true); m_values.push_back(new Datapoint(name, dpv)); } else if (itr->value.IsArray()) { vector<double> arr; for (auto& v : itr->value.GetArray()) { if (v.IsNumber()) arr.emplace_back(v.GetDouble()); else throw runtime_error("Only numeric lists are currently supported in datapoints"); } DatapointValue dpv(arr); m_values.emplace_back(new Datapoint(name, dpv)); } } // Store seconds and microseconds gettimeofday(&m_timestamp, NULL); // Initialise m_userTimestamp m_userTimestamp = m_timestamp; } /** * Reading copy constructor */ Reading::Reading(const Reading& orig) : m_asset(orig.m_asset), m_timestamp(orig.m_timestamp), m_userTimestamp(orig.m_userTimestamp), m_has_id(orig.m_has_id), m_id(orig.m_id) { for (auto it = orig.m_values.cbegin(); it != orig.m_values.cend(); it++) { m_values.emplace_back(new Datapoint(**it)); } } /** * Destructor for Reading class */ Reading::~Reading() { for (auto it = m_values.cbegin(); it != m_values.cend(); it++) { delete(*it); } } /** * Remove all data points for Reading class */ void Reading::removeAllDatapoints() { for (auto it = m_values.cbegin(); it != m_values.cend(); it++) { delete(*it); } m_values.clear(); } /** * Add another data point to an asset reading */ void Reading::addDatapoint(Datapoint *value) { m_values.push_back(value); } /** * Remove a datapoint from the reading * * @param name Name of the datapoint to remove * @return Pointer to the datapoint removed or NULL if it was not found */ Datapoint *Reading::removeDatapoint(const string& name) { Datapoint *rval; for (auto it = m_values.begin(); it != m_values.end(); it++) { if ((*it)->getName().compare(name) == 0) { rval = *it; m_values.erase(it); return rval; } } return NULL; } /** * Return a specific data point by name * * @param name The name of the datapoint to return * @return Pointer a the named datapoint or NULL if it does not exist */ Datapoint *Reading::getDatapoint(const string& name) const { Datapoint *rval; for (auto it = m_values.cbegin(); it != m_values.cend(); it++) { if ((*it)->getName().compare(name) == 0) { rval = *it; return rval; } } return NULL; } /** * Return the asset reading as a JSON structure encoded in a * C++ string. */ string Reading::toJSON(bool minimal) const { ostringstream convert; convert << "{\"asset_code\":\""; convert << escape(m_asset); convert << "\",\"user_ts\":\""; // Add date_time with microseconds + timezone UTC: // YYYY-MM-DD HH24:MM:SS.MS+00:00 convert << getAssetDateUserTime(FMT_DEFAULT) << "+00:00"; if (!minimal) { convert << "\",\"ts\":\""; // Add date_time with microseconds + timezone UTC: // YYYY-MM-DD HH24:MM:SS.MS+00:00 convert << getAssetDateTime(FMT_DEFAULT) << "+00:00"; } // Add values convert << "\",\"reading\":{"; for (auto it = m_values.cbegin(); it != m_values.cend(); it++) { if (it != m_values.cbegin()) { convert << ","; } convert << (*it)->toJSONProperty(); } convert << "}}"; return convert.str(); } /** * Return the asset reading as a JSON structure encoded in a * C++ string. */ string Reading::getDatapointsJSON() const { ostringstream convert; convert << "{"; for (auto it = m_values.cbegin(); it != m_values.cend(); it++) { if (it != m_values.cbegin()) { convert << ","; } convert << (*it)->toJSONProperty(); } convert << "}"; return convert.str(); } /** * Convert time since epoch to a formatted m_timestamp DataTime in UTC * and use a cache to speed it up * @param tv_sec Seconds since epoch * @param date_time Buffer in which to return the formatted timestamp * @param dateFormat Format: FMT_DEFAULT or FMT_STANDARD */ void Reading::getFormattedDateTimeStr(const time_t *tv_sec, char *date_time, readingTimeFormat dateFormat) const { static unsigned long cached_sec_since_epoch = 0; static char cached_date_time_str[DATE_TIME_BUFFER_LEN] = ""; static readingTimeFormat cachedDateFormat = (readingTimeFormat) 0xff; static std::mutex mtx; std::unique_lock<std::mutex> lck(mtx); if(*cached_date_time_str && cached_sec_since_epoch && *tv_sec == cached_sec_since_epoch && cachedDateFormat == dateFormat) { strncpy(date_time, cached_date_time_str, DATE_TIME_BUFFER_LEN); date_time[DATE_TIME_BUFFER_LEN-1] = '\0'; return; } struct tm timeinfo; gmtime_r(tv_sec, &timeinfo); /** * Build date_time with format YYYY-MM-DD HH24:MM:SS.MS+00:00 * this is same as Python3: * datetime.datetime.now(tz=datetime.timezone.utc) */ // Create datetime with seconds std::strftime(date_time, DATE_TIME_BUFFER_LEN, m_dateTypes[dateFormat].c_str(), &timeinfo); // update cache strncpy(cached_date_time_str, date_time, DATE_TIME_BUFFER_LEN); cached_date_time_str[DATE_TIME_BUFFER_LEN-1] = '\0'; cached_sec_since_epoch = *tv_sec; cachedDateFormat = dateFormat; } /** * Return a formatted m_timestamp DataTime in UTC * @param dateFormat Format: FMT_DEFAULT or FMT_STANDARD * @return The formatted datetime string */ const string Reading::getAssetDateTime(readingTimeFormat dateFormat, bool addMS) const { char date_time[DATE_TIME_BUFFER_LEN]; char micro_s[10]; char assetTime[DATE_TIME_BUFFER_LEN + 20]; getFormattedDateTimeStr(&m_timestamp.tv_sec, date_time, dateFormat); if (dateFormat != FMT_ISO8601 && addMS) { // Add microseconds snprintf(micro_s, sizeof(micro_s), ".%06lu", m_timestamp.tv_usec); // Add date_time + microseconds if (dateFormat != FMT_ISO8601MS) { snprintf(assetTime, sizeof(assetTime), "%s%s", date_time, micro_s); } else { string dt(date_time); size_t pos = dt.find_first_of("+"); pos--; snprintf(assetTime, sizeof(assetTime), "%s%s%s", dt.substr(0, pos).c_str(), micro_s, dt.substr(pos).c_str()); } return string(assetTime); } else { return string(date_time); } } /** * Return a formatted m_userTimestamp DataTime in UTC * @param dateFormat Format: FMT_DEFAULT or FMT_STANDARD * @return The formatted datetime string */ const string Reading::getAssetDateUserTime(readingTimeFormat dateFormat, bool addMS) const { char date_time[DATE_TIME_BUFFER_LEN+10]; char micro_s[10]; char assetTime[DATE_TIME_BUFFER_LEN + 20]; getFormattedDateTimeStr(&m_userTimestamp.tv_sec, date_time, dateFormat); if (dateFormat != FMT_ISO8601 && addMS) { // Add microseconds snprintf(micro_s, sizeof(micro_s), ".%06lu", m_userTimestamp.tv_usec); // Add date_time + microseconds if (dateFormat != FMT_ISO8601MS) { snprintf(assetTime, sizeof(assetTime), "%s%s", date_time, micro_s); } else { string dt(date_time); size_t pos = dt.find_first_of("+"); pos--; snprintf(assetTime, sizeof(assetTime), "%s%s%s", dt.substr(0, pos).c_str(), micro_s, dt.substr(pos).c_str()); } return string(assetTime); } else { return string(date_time); } } /** * Set the system timestamp from a string of the format * 2019-01-01 10:00:00.123456+08:00 * The timeval is populated in UTC * * @param timestamp The timestamp string */ void Reading::setTimestamp(const string& timestamp) { stringToTimestamp(timestamp, &m_timestamp); } /** * Set the user timestamp from a string of the format * 2019-01-01 10:00:00.123456+08:00 * The timeval is populated in UTC * * @param timestamp The timestamp string */ void Reading::setUserTimestamp(const string& timestamp) { stringToTimestamp(timestamp, &m_userTimestamp); } /** * Convert a string timestamp, with milliseconds to a * struct timeval. * * Timezone handling * The timezone in the string is extracted to get UTC values. * Times within a reading are always stored as UTC * * @param timestamp String timestamp * @param ts Struct timeval to populate */ void Reading::stringToTimestamp(const string& timestamp, struct timeval *ts) { static std::mutex mtx; static char cached_timestamp_upto_min[32] = ""; static unsigned long cached_sec_since_epoch = 0; const int timestamp_str_len_till_min = 16; const int timestamp_str_len_till_sec = 19; char date_time [DATE_TIME_BUFFER_LEN]; strcpy (date_time, timestamp.c_str()); { lock_guard<mutex> guard(mtx); char timestamp_sec[32]; strncpy(timestamp_sec, date_time, timestamp_str_len_till_sec); timestamp_sec[timestamp_str_len_till_sec] = '\0'; if(*cached_timestamp_upto_min && cached_sec_since_epoch && (strncmp(timestamp_sec, cached_timestamp_upto_min, timestamp_str_len_till_min) == 0)) { // cache hit int sec_part = strtoul(timestamp_sec+timestamp_str_len_till_min+1, NULL, 10); ts->tv_sec = cached_sec_since_epoch + sec_part; } else { // cache miss struct tm tm; memset(&tm, 0, sizeof(struct tm)); strptime(date_time, "%Y-%m-%d %H:%M:%S", &tm); // Convert time to epoch - mktime assumes localtime so most adjust for that ts->tv_sec = mktime(&tm); extern long timezone; ts->tv_sec -= timezone; strncpy(cached_timestamp_upto_min, timestamp_sec, timestamp_str_len_till_min); cached_timestamp_upto_min[timestamp_str_len_till_min] = '\0'; cached_sec_since_epoch = ts->tv_sec - tm.tm_sec; // store only for upto-minute part } } // Now process the fractional seconds const char *ptr = date_time; while (*ptr && *ptr != '.') ptr++; if (*ptr) { char *eptr; ts->tv_usec = strtol(ptr + 1, &eptr, 10); int digits = eptr - (ptr + 1); // Number of digits we have while (digits < 6) { digits++; ts->tv_usec *= 10; } } else { ts->tv_usec = 0; } // Get the timezone from the string and convert to UTC ptr = date_time + 10; // Skip date as it contains '-' characters while (*ptr && *ptr != '-' && *ptr != '+') ptr++; if (*ptr) { int h, m; int sign = (*ptr == '+' ? -1 : +1); h = strtoul(ptr+1, NULL, 10); m = strtoul(ptr+4, NULL, 10); ts->tv_sec += sign * ((3600 * h) + (60 * m)); } } /** * Escape quotes etc to allow the string to be a property value within * a JSON document * * @param str The string to escape * @return The escaped string */ const string Reading::escape(const string& str) const { string rval; int bscount = 0; for (size_t i = 0; i < str.length(); i++) { if (str[i] == '\\') { if (i + 1 < str.length() && (str[i + 1] == '"' || str[i + 1] == '\\' || str[i + 1] == '/' || str[i-1] == '\\')) { rval += '\\'; } else { rval += "\\\\"; } bscount++; } else if (str[i] == '\"') { if ((bscount & 1) == 0) // not already escaped { rval += "\\"; // Add escape of " } rval += str[i]; bscount = 0; } else { rval += str[i]; bscount = 0; } } return rval; } /** * Convert a JSON Value object to a set of data points * * @param json The json object to convert */ vector<Datapoint *> *Reading::JSONtoDatapoints(const Value& json) { vector<Datapoint *> *values = new vector<Datapoint *>; for (Value::ConstMemberIterator itr = json.MemberBegin(); itr != json.MemberEnd(); ++itr) { string name = itr->name.GetString(); if (itr->value.IsInt64()) { long v = itr->value.GetInt64(); DatapointValue dpv(v); values->push_back(new Datapoint(name, dpv)); } else if (itr->value.IsDouble()) { double v = itr->value.GetDouble(); DatapointValue dpv(v); values->push_back(new Datapoint(name, dpv)); } else if (itr->value.IsString()) { string v = itr->value.GetString(); DatapointValue dpv(v); values->push_back(new Datapoint(name, dpv)); } else if (itr->value.IsObject()) { // Map objects as nested datapoints vector<Datapoint *> *nestedValues = JSONtoDatapoints(itr->value); DatapointValue dpv(nestedValues, true); values->push_back(new Datapoint(name, dpv)); } else if (itr->value.IsArray()) { vector<double> arr; for (auto& v : itr->value.GetArray()) { if (v.IsNumber()) arr.emplace_back(v.GetDouble()); else throw runtime_error("Only numeric lists are currently supported in datapoints"); } DatapointValue dpv(arr); values->emplace_back(new Datapoint(name, dpv)); } } return values; } /** * Create the information about the macros to substitute in the given string * * @param str The string we are substituting * @param macros Vector of macros to build up */ void Reading::collectMacroInfo(const string& str, vector<Macro>& macros) { string::size_type start = str.find('$'); string::size_type end = str.find('$', start + 1); while (start != string::npos && end != string::npos) { string::size_type bar = str.find('|', start + 1); if (bar != string::npos && bar < end && bar > start + 1) { string def = str.substr(bar + 1, end - bar - 1); macros.emplace_back(Macro(str.substr(start + 1, bar - start - 1), start, def)); } else if (end > start + 1) { macros.emplace_back(Macro(str.substr(start + 1, end - start - 1), start)); } start = str.find('$', end + 1); end = str.find('$', start + 1); } } /** * Substitute values from this reading into the string. * Macros are of the form $datapointname$, $ASSET$ or * $datapointname|default$ * * @param str The string to substitute into * @return string The substituted string */ string Reading::substitute(const string& str) { string rval = str; vector<Macro> macros; collectMacroInfo(rval, macros); // Replace Macros by datapoint value for (auto it = macros.rbegin(); it != macros.rend(); ++it) { // In case of ASSET Macro, replace it by asset name instead of datapoint value if (it->name == "ASSET") { rval.replace(it->start, it->name.length()+2, this->getAssetName() ); continue; } Datapoint * datapoint = this->getDatapoint(it->name); if (datapoint) { // Check for datapoint type for string and numbers DatapointValue::dataTagType dataType = datapoint->getData().getType(); if ( dataType != DatapointValue::dataTagType::T_STRING && dataType != DatapointValue::dataTagType::T_INTEGER && dataType != DatapointValue::dataTagType::T_FLOAT ) { Logger::getLogger()->warn("The datapoint %s cannot be used as a macro substitution as it is not a string or numeric value",it->name.c_str()); continue; } string datapointValue = ""; switch (dataType) { case DatapointValue::dataTagType::T_INTEGER: datapointValue = std::to_string(datapoint->getData().toInt()); break; case DatapointValue::dataTagType::T_FLOAT: datapointValue = std::to_string(datapoint->getData().toDouble()); break; default: datapointValue = datapoint->getData().toStringValue(); break; } rval.replace(it->start, it->name.length()+2 + (it->def.empty() ? 0 : it->def.length() + 1), datapointValue ); } else if (!it->def.empty()) { rval.replace(it->start, it->name.length() + it->def.length() + 3, it->def); } else { rval.replace(it->start, it->name.length()+2, ""); } } return rval; } ================================================ FILE: C/common/reading_circularbuffer.cpp ================================================ /* * Fledge reading circular buffer class * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <reading_circularbuffer.h> using namespace std; /** * Create a circular buffer of readings * * @param size The number of items to retain in the circular buffer */ ReadingCircularBuffer::ReadingCircularBuffer(unsigned int size) : m_size(size), m_insert(0), m_entries(0) { m_readings.resize(size, NULL); } /** * Destructor for the circular buffer */ ReadingCircularBuffer::~ReadingCircularBuffer() { lock_guard<mutex> guard(m_mutex); for (int i = 0; i < m_entries; i++) m_readings[i] = NULL; } /** * Insert a single reading into the shared buffer * * @param reading The reading to insert */ void ReadingCircularBuffer::insert(Reading *reading) { lock_guard<mutex> guard(m_mutex); if (m_entries == m_size) m_readings[m_insert] = NULL; else m_entries++; shared_ptr<Reading> copy(new Reading(*reading)); m_readings[m_insert] = copy; m_insert++; if (m_insert >= m_size) m_insert = 0; } /** * Insert a list of readings into the circular buffer * * @param readings The set of readings to ingest */ void ReadingCircularBuffer::insert(const vector<Reading *>& readings) { for (auto& reading : readings) insert(reading); } /** * Insert a list of readings into the circular buffer * * @param readings The set of readings to ingest */ void ReadingCircularBuffer::insert(const vector<Reading *> *readings) { for (auto& reading : *readings) insert(reading); } /** * Return the buffered data into a supplied vector * * @param vec The vector to populate with the shared pointers * @return int The number of readings placed in the vector */ int ReadingCircularBuffer::extract(vector<shared_ptr<Reading>>& vec) { int start = 0; lock_guard<mutex> guard(m_mutex); if (m_entries == m_size) { start = (m_insert + 1) % m_size; } for (int i = 0; i < m_entries; i++) { vec.push_back(m_readings[start % m_size]); start++; } return m_entries; } ================================================ FILE: C/common/reading_set.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <reading_set.h> #include <string> #include <rapidjson/document.h> #include <sstream> #include <iostream> #include <time.h> #include <stdlib.h> #include <logger.h> #include <base64databuffer.h> #include <base64dpimage.h> #include <boost/algorithm/string/replace.hpp> #define ASSET_NAME_INVALID_READING "error_invalid_reading" static const char* kTypeNames[] = { "Null", "False", "True", "Object", "Array", "String", "Number" }; using namespace std; using namespace rapidjson; // List of characters to be escaped in JSON const vector<string> JSON_characters_to_be_escaped = { "\\", "\"" }; /** * Construct an empty reading set */ ReadingSet::ReadingSet() : m_count(0), m_last_id(0) { } /** * Construct a reading set from a vector<Reading *> pointer * NOTE: readings are copied into m_readings * * @param readings The vector<Reading *> pointer * of readings to be copied * into m_readings vector */ ReadingSet::ReadingSet(const vector<Reading *>* readings) : m_last_id(0) { m_count = readings->size(); for (auto it = readings->begin(); it != readings->end(); ++it) { if ((*it)->hasId() && (*it)->getId() > m_last_id) m_last_id = (*it)->getId(); m_readings.push_back(*it); } } /** * Construct a reading set from a JSON document returned from * the Fledge storage service query or notification. The JSON * is parsed using the in-situ RapidJSON parser in order to * reduce overhead on what is most likely a large JSON document. * * WARNING: Although the string passed in is defiend as const * this call is destructive to this string and the conntents * of the string should not be used after making this call. * * @param json The JSON document (as string) with readings data */ ReadingSet::ReadingSet(const std::string& json) : m_last_id(0) { unsigned long rows = 0; Document doc; doc.ParseInsitu((char *)json.c_str()); // Cast away const in order to use in-situ if (doc.HasParseError()) { throw new ReadingSetException("Unable to parse results json document"); } // Check we have "count" and "rows" bool docHasRows = doc.HasMember("rows"); // Query bool docHasReadings = doc.HasMember("readings"); // Notification // Check we have "rows" or "readings" if (!docHasRows && !docHasReadings) { throw new ReadingSetException("Missing readings or rows array"); } // Check we have "count" and "rows" if (doc.HasMember("count") && docHasRows) { m_count = doc["count"].GetUint(); // No readings if (!m_count) { m_last_id = 0; return; } } else { // These fields might be updated later m_count = 0; m_last_id = 0; } // Get "rows" or "readings" data const Value& readings = docHasRows ? doc["rows"] : doc["readings"]; if (readings.IsArray()) { unsigned long id = 0; // Process every rows and create the result set for (auto& reading : readings.GetArray()) { if (!reading.IsObject()) { throw new ReadingSetException("Expected reading to be an object"); } JSONReading *value = new JSONReading(reading); m_readings.push_back(value); // Get the Reading Id id = value->getId(); // We don't have count informations with "readings" if (docHasReadings) { rows++; } } // Set the last id m_last_id = id; // Set count informations with "readings" if (docHasReadings) { m_count = rows; } } else { throw new ReadingSetException("Expected array of rows in result set"); } } /** * Destructor for a result set */ ReadingSet::~ReadingSet() { /* Delete the readings */ for (auto it = m_readings.cbegin(); it != m_readings.cend(); it++) { delete *it; } } /** * Append the readings in a second reading set to this reading set. * The readings are removed from the original reading set * * @param readings A ReadingSet to append to the current ReadingSet */ void ReadingSet::append(ReadingSet *readings) { vector<Reading *> *vec = readings->getAllReadingsPtr(); append(*vec); readings->clear(); } /** * Append the readings in a second reading set to this reading set. * The readings are removed from the original reading set * * @param readings A ReadingSet to append to the current ReadingSet */ void ReadingSet::append(ReadingSet& readings) { vector<Reading *> *vec = readings.getAllReadingsPtr(); append(*vec); readings.clear(); } /** * Append a set of readings to this reading set. The * readings are not copied, but rather moved from the * vector, with the resulting vector havign the values * removed on return. * * It is assumed the readings in the vector have been * created with the new operator. * * @param readings A vector of Reading pointers to append to the ReadingSet */ void ReadingSet::append(vector<Reading *>& readings) { for (auto it = readings.cbegin(); it != readings.cend(); it++) { if ((*it)->getId() > m_last_id) m_last_id = (*it)->getId(); m_readings.push_back(*it); m_count++; } readings.clear(); } /** * merge the readings in a vector with the set of readings in the reading set. * The input reading vector must be ordered as per timestamp and cleared at the end of the operation. * @param readings A vector of Reading pointers to merge with the ReadingSet */ void ReadingSet::merge(std::vector<Reading *> *readings) { if (!readings || readings->empty()) { return; } size_t totalSize = m_readings.size() + readings->size(); std::vector<Reading*> merged; merged.reserve(totalSize); merged.resize(totalSize); // make sure we can assign via operator[] size_t i = 0; auto p1 = m_readings.begin(); auto p2 = readings->begin(); while (p1 != m_readings.end() || p2 != readings->end()) { if (p1 != m_readings.end() && p2 != readings->end()) { struct timeval ta, tb; (*p1)->getUserTimestamp(&ta); (*p2)->getUserTimestamp(&tb); // stable ordering: if equal, p1 wins if (timercmp(&ta, &tb, <=)) { merged[i++] = *p1++; } else { if ((*p2)->hasId() && (*p2)->getId() > m_last_id) { m_last_id = (*p2)->getId(); } merged[i++] = *p2++; } } else if (p1 != m_readings.end()) { merged[i++] = *p1++; } else if (p2 != readings->end()) { if ((*p2)->hasId() && (*p2)->getId() > m_last_id) { m_last_id = (*p2)->getId(); } merged[i++] = *p2++; } } m_readings = std::move(merged); m_count = m_readings.size(); //Clear input readings vector readings->clear(); } /** * Deep copy a set of readings to this reading set. * * @param src The reading set to copy * @return bool True if the reading set was copied */ bool ReadingSet::copy(const ReadingSet& src) { vector<Reading *> readings; bool copyResult = true; try { // Iterate over all the readings in ReadingSet for (auto const &reading : src.getAllReadings()) { string assetName = reading->getAssetName(); string ts = reading->getAssetDateUserTime(); vector<Datapoint *> dataPoints; try { // Iterate over all the datapoints associated with one reading for (auto const &dp : reading->getReadingData()) { string dataPointName = dp->getName(); DatapointValue dv = dp->getData(); dataPoints.emplace_back(new Datapoint(dataPointName, dv)); } } // Catch exception while copying datapoints catch(std::bad_alloc& ex) { Logger::getLogger()->error("Insufficient memory, failed while copying dataPoints from ReadingSet, %s ", ex.what()); copyResult = false; for (auto const &dp : dataPoints) { delete dp; } dataPoints.clear(); throw; } catch (std::exception& ex) { Logger::getLogger()->error("Unknown exception, failed while copying datapoint from ReadingSet, %s ", ex.what()); copyResult = false; for (auto const &dp : dataPoints) { delete dp; } dataPoints.clear(); throw; } Reading *in = new Reading(assetName, dataPoints, ts); readings.emplace_back(in); } } // Catch exception while copying readings catch (std::bad_alloc& ex) { Logger::getLogger()->error("Insufficient memory, failed while copying %d reading from ReadingSet, %s ",readings.size()+1, ex.what()); copyResult = false; for (auto const &r : readings) { delete r; } readings.clear(); } catch (std::exception& ex) { Logger::getLogger()->error("Unknown exception, failed while copying %d reading from ReadingSet, %s ",readings.size()+1, ex.what()); copyResult = false; for (auto const &r : readings) { delete r; } readings.clear(); } //Append if All elements have been copied successfully if (copyResult) { append(readings); } return copyResult; } /** * Remove all readings from the reading set and delete the memory * After this call the reading set exists but contains no readings. */ void ReadingSet::removeAll() { for (auto it = m_readings.cbegin(); it != m_readings.cend(); it++) { delete *it; } m_readings.clear(); m_count = 0; m_last_id = 0; } /** * Remove the readings from the vector without deleting them */ void ReadingSet::clear() { m_readings.clear(); m_count = 0; m_last_id = 0; } /** * Remove readings from the vector and return a reference to new vector * containing readings* */ std::vector<Reading*>* ReadingSet::moveAllReadings() { std::vector<Reading*>* transferredPtr = new std::vector<Reading*>(std::move(m_readings)); m_count = 0; m_last_id = 0; m_readings.clear(); return transferredPtr; } /** * Remove reading from vector based on index and return its pointer */ Reading* ReadingSet::removeReading(unsigned long id) { if (id >= m_readings.size()) { return nullptr; } Reading* reading = m_readings[id]; m_readings.erase(m_readings.begin() + id); m_count--; return reading; } /** * Return the ID of the nth reading in the reading set * * @param pos The position of the reading to return the ID for */ unsigned long ReadingSet::getReadingId(uint32_t pos) { if (pos < m_readings.size()) { Reading *reading = m_readings[pos]; return reading->getId(); } return m_last_id; } /** * Construct a reading from a JSON document * * The data can be in the "value" property as single numeric value * or in the JSON "reading" with different values and types * * @param json The JSON document that contains the reading */ JSONReading::JSONReading(const Value& json) { if (json.HasMember("id")) { m_id = json["id"].GetUint64(); m_has_id = true; } else { m_has_id = false; } if (json.HasMember("asset_code")) { m_asset = json["asset_code"].GetString(); } else { string errMsg = "Malformed JSON reading, missing asset_code '"; errMsg.append("value"); errMsg += "'"; throw new ReadingSetException(errMsg.c_str()); } if (json.HasMember("user_ts")) { stringToTimestamp(json["user_ts"].GetString(), &m_userTimestamp); } else { string errMsg = "Malformed JSON reading, missing user timestamp '"; errMsg.append("value"); errMsg += "'"; throw new ReadingSetException(errMsg.c_str()); } if (json.HasMember("ts")) { stringToTimestamp(json["ts"].GetString(), &m_timestamp); } else { m_timestamp = m_userTimestamp; } // We have a single value here which is a number if (json.HasMember("value") && json["value"].IsNumber()) { const Value &m = json["value"]; if (m.IsInt() || m.IsUint() || m.IsInt64() || m.IsUint64()) { DatapointValue* value; if (m.IsInt() || m.IsUint() ) { value = new DatapointValue((long) m.GetInt()); } else { value = new DatapointValue((long) m.GetInt64()); } this->addDatapoint(new Datapoint("value",*value)); delete value; } else if (m.IsDouble()) { DatapointValue value(m.GetDouble()); this->addDatapoint(new Datapoint("value", value)); } else { string errMsg = "Cannot parse the numeric type"; errMsg += " of reading element '"; errMsg.append("value"); errMsg += "'"; throw new ReadingSetException(errMsg.c_str()); } } else if (json.HasMember("reading")) { if (json["reading"].IsObject()) { // Add 'reading' values for (auto &m : json["reading"].GetObject()) { Datapoint *dp = datapoint(m.name.GetString(), m.value); if (dp) { addDatapoint(dp); } } } else { // The reading should be an object at this stage, it is and invalid one if not // the asset name ASSET_NAME_INVALID_READING will be created in the PI-Server containing the // invalid asset_name/values. if (json["reading"].IsString()) { string tmp_reading1 = json["reading"].GetString(); // Escape specific character for to be properly manage as JSON for (const string &item : JSON_characters_to_be_escaped) { escapeCharacter(tmp_reading1, item); } Logger::getLogger()->error( "Invalid reading: Asset name |%s| reading value |%s| converted value |%s|", m_asset.c_str(), json["reading"].GetString(), tmp_reading1.c_str()); DatapointValue value(tmp_reading1); this->addDatapoint(new Datapoint(m_asset, value)); } else if (json["reading"].IsInt() || json["reading"].IsUint() || json["reading"].IsInt64() || json["reading"].IsUint64()) { DatapointValue *value; if (json["reading"].IsInt() || json["reading"].IsUint()) { value = new DatapointValue((long) json["reading"].GetInt()); } else { value = new DatapointValue((long) json["reading"].GetInt64()); } this->addDatapoint(new Datapoint(m_asset, *value)); delete value; } else if (json["reading"].IsDouble()) { DatapointValue value(json["reading"].GetDouble()); this->addDatapoint(new Datapoint(m_asset, value)); } m_asset = string(ASSET_NAME_INVALID_READING) + string("_") + m_asset.c_str(); } } else { Logger::getLogger()->error("Missing reading property for JSON reading, %s", m_asset.c_str()); } } /** * Create a Datapoint from a JSON item in a reading * * @param item The JSON object forthe data point * @return Datapoint* The new data point */ Datapoint *JSONReading::datapoint(const string& name, const Value& item) { Datapoint *rval = NULL; switch (item.GetType()) { // String case (kStringType): { string str = item.GetString(); if (str[0] == '_' && str[1] == '_') { // special encoded type size_t pos = str.find_first_of(':'); if (str.compare(2, 10, "DATABUFFER") == 0) { try { DataBuffer *databuffer = new Base64DataBuffer(str.substr(pos + 1)); DatapointValue value(databuffer); rval = new Datapoint(name, value); } catch (exception& e) { Logger::getLogger()->error("Unable to create datapoint %s as the base 64 encoded data is incorrect, %s", name.c_str(), e.what()); } } else if (str.compare(2, 7, "DPIMAGE") == 0) { try { DPImage *image = new Base64DPImage(str.substr(pos + 1)); DatapointValue value(image); rval = new Datapoint(name, value); } catch (exception& e) { Logger::getLogger()->error("Unable to create datapoint %s as the base 64 encoded data is incorrect, %s", name.c_str(), e.what()); } } } else { DatapointValue value(item.GetString()); rval = new Datapoint(name, value); } break; } // Number case (kNumberType): { if (item.IsInt()) { DatapointValue value((long)item.GetInt()); rval = new Datapoint(name, value); break; } else if (item.IsUint()) { DatapointValue value((long)item.GetUint()); rval = new Datapoint(name, value); break; } else if (item.IsInt64()) { DatapointValue value((long)item.GetInt64()); rval = new Datapoint(name, value); break; } else if (item.IsUint64()) { DatapointValue value((long)item.GetUint64()); rval = new Datapoint(name, value); break; } else if (item.IsDouble()) { DatapointValue value(item.GetDouble()); rval = new Datapoint(name, value); break; } else { string errMsg = "Cannot parse the numeric type"; errMsg += " of reading element '"; errMsg.append(name); errMsg += "'"; throw new ReadingSetException(errMsg.c_str()); break; } } // Arrays case kArrayType: { vector<double> arrayValues; for (auto& v : item.GetArray()) { if (v.IsDouble()) { arrayValues.push_back(v.GetDouble()); } else if (v.IsInt() || v.IsUint()) { double i = (double)v.GetInt(); arrayValues.push_back(i); } else if (v.IsInt64() || v.IsUint64()) { double i = (double)v.GetInt64(); arrayValues.push_back(i); } } // Don't create blank array of datapoint values if (!arrayValues.empty()) { DatapointValue value(arrayValues); rval = new Datapoint(name, value); } break; } // Nested object case kObjectType: { vector<Datapoint *> *obj = new vector<Datapoint *>; for (auto &mo : item.GetObject()) { Datapoint *dp = datapoint(mo.name.GetString(), mo.value); if (dp) { obj->push_back(dp); } } DatapointValue value(obj, true); rval = new Datapoint(name, value); break; } case kTrueType: { DatapointValue value("true"); rval = new Datapoint(name, value); break; } case kFalseType: { DatapointValue value("false"); rval = new Datapoint(name, value); break; } default: { char errMsg[80]; snprintf(errMsg, sizeof(errMsg), "Unhandled type for %s in JSON payload %d", name.c_str(), item.GetType()); throw new ReadingSetException(errMsg); } } return rval; } /** * Escapes a character in a string to be properly handled as JSON * */ void JSONReading::escapeCharacter(string& stringToEvaluate, string pattern) { string escaped = "\\" + pattern; boost::replace_all(stringToEvaluate, pattern, escaped); } ================================================ FILE: C/common/readingset_circularbuffer.cpp ================================================ /* * Fledge ReadingSet Circular Buffer. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <readingset_circularbuffer.h> #include <logger.h> using namespace std; using namespace rapidjson; /** * Construct an empty reading set circular buffer * * @param maxBufferSize Maximum size of the ReadingSet circular buffer. It should be atleast one. */ ReadingSetCircularBuffer::ReadingSetCircularBuffer(unsigned long maxBufferSize) { if ( maxBufferSize <= 0) { maxBufferSize = 1; Logger::getLogger()->warn("Minimum size of ReadingSetCircularBuffer cannot be less than one, setting buffer size to 1"); } m_maxBufferSize = maxBufferSize; m_nextReadIndex = 0; } /** * Destructor for a result set */ ReadingSetCircularBuffer::~ReadingSetCircularBuffer() { lock_guard<mutex> guard(m_mutex); /* Delete the readings */ m_circularBuffer.clear(); } /** * Insert a ReadingSet into circular buffer * * @param readings Reference for ReadingSet to be inserted into circular buffer */ void ReadingSetCircularBuffer::insert(ReadingSet& readings) { appendReadingSet(readings.getAllReadings()); } /** * Insert a ReadingSet into circular buffer * * @param readings Pointer for ReadingSet to be inserted into circular buffer */ void ReadingSetCircularBuffer::insert(ReadingSet* readings) { appendReadingSet(readings->getAllReadings()); } /** * Internal implementation for inserting ReadingSet into the circular buffer * * @param readings appends ReadingSet into the circular buffer */ void ReadingSetCircularBuffer::appendReadingSet(const std::vector<Reading *>& readings) { lock_guard<mutex> guard(m_mutex); bool isBufferFull = (m_circularBuffer.size() == m_maxBufferSize); //Check if there is space available to insert a new ReadingSet if (isBufferFull) { Logger::getLogger()->info("ReadingSetCircularBuffer buffer is full, removing first element"); // Make space for new ReadingSet and adjust m_nextReadIndex m_circularBuffer.erase(m_circularBuffer.begin() + 0); m_nextReadIndex--; } std::vector<Reading *> *newReadings = new std::vector<Reading *>; // Iterate over all the readings in ReadingSet for (auto const &reading : readings) { newReadings->emplace_back(new Reading(*reading)); } // Insert ReadingSet into buffer m_circularBuffer.push_back(std::make_shared<ReadingSet>(newReadings)); delete newReadings; } /** * Fetch the vector of ReadingSet from circular buffer * * @param isExtractSingleElement True to extract single ReadingSet otherwise for extract entire buffer * @return Return a vector of shared pointer to ReadingSet */ std::vector<std::shared_ptr<ReadingSet>> ReadingSetCircularBuffer::extract(bool isExtractSingleElement) { lock_guard<mutex> guard(m_mutex); bool isNoDataToRead = m_circularBuffer.empty() || (m_nextReadIndex == m_circularBuffer.size()); std::vector<std::shared_ptr<ReadingSet>> bufferedItem; // Check for empty buffer if (isNoDataToRead) { Logger::getLogger()->info("There is no more data to read in ReadingSet circualr buffer"); return bufferedItem; } // Return single item from buffer if (isExtractSingleElement) { bufferedItem.emplace_back(m_circularBuffer[m_nextReadIndex]); m_nextReadIndex++; return bufferedItem; } // Return Entire buffer if(m_nextReadIndex == 0) { m_nextReadIndex = m_circularBuffer.size(); return m_circularBuffer; } // Send remaining items in the buffer for (int i = m_nextReadIndex; i < m_circularBuffer.size(); i ++) bufferedItem.emplace_back(m_circularBuffer[i]); m_nextReadIndex = m_circularBuffer.size(); return bufferedItem; } ================================================ FILE: C/common/result_set.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <resultset.h> #include <string> #include <rapidjson/document.h> #include <sstream> #include <iostream> using namespace std; using namespace rapidjson; /** * Construct a result set from a JSON document returned from * the Fledge storage service. * * @param json The JSON document to construct the result set from */ ResultSet::ResultSet(const std::string& json) { Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { throw new ResultException("Unable to parse results json document"); } if (doc.HasMember("count") && doc["count"].IsUint()) { m_rowCount = doc["count"].GetUint(); if (m_rowCount) { const Value& rows = doc["rows"]; if (!doc.HasMember("rows")) { throw new ResultException("Missing rows array"); } if (rows.IsArray()) { // Process first row to get column names and types const Value& firstRow = rows[0]; for (Value::ConstMemberIterator itr = firstRow.MemberBegin(); itr != firstRow.MemberEnd(); ++itr) { ColumnType type = STRING_COLUMN; if (itr->value.IsObject()) { type = JSON_COLUMN; } else if (itr->value.IsNumber() && itr->value.IsDouble()) { type = NUMBER_COLUMN; } else if (itr->value.IsNumber()) { type = INT_COLUMN; } else if (itr->value.IsBool()) { type = BOOL_COLUMN; } else if (itr->value.IsString()) { type = STRING_COLUMN; } // Array of any objects is JSON else if (itr->value.IsArray()) { type = JSON_COLUMN; } else { throw new ResultException("Unable to determine column type"); } m_columns.push_back(new Column(string(itr->name.GetString()), type)); } // Process every rows and create the result set for (auto& row : rows.GetArray()) { if (!row.IsObject()) { throw new ResultException("Expected row to be an object"); } ResultSet::Row *rowValue = new ResultSet::Row(this); unsigned int colNo = 0; for (Value::ConstMemberIterator item = row.MemberBegin(); item != row.MemberEnd(); ++item) { switch (m_columns[colNo]->getType()) { case STRING_COLUMN: if (item->value.IsBool()) { rowValue->append(new ColumnValue(item->value.IsTrue() ? "true" : "false")); } else { rowValue->append(new ColumnValue(string(item->value.GetString()))); } break; case INT_COLUMN: rowValue->append(new ColumnValue((long)(item->value.GetInt64()))); break; case NUMBER_COLUMN: rowValue->append(new ColumnValue(item->value.GetDouble())); break; case JSON_COLUMN: rowValue->append(new ColumnValue(item->value)); break; case BOOL_COLUMN: if (item->value.IsString()) rowValue->append(new ColumnValue(string(item->value.GetString()))); else rowValue->append(new ColumnValue(item->value.IsTrue() ? "true" : "false")); break; } colNo++; } m_rows.push_back(rowValue); } } else { throw new ResultException("Expected array of rows in result set"); } } } else { m_rowCount = 0; } } /** * Destructor for a result set */ ResultSet::~ResultSet() { /* Delete the columns */ for (auto it = m_columns.cbegin(); it != m_columns.cend(); it++) { delete *it; } /* Delete the rows */ for (auto it = m_rows.cbegin(); it != m_rows.cend(); it++) { delete *it; } } /** * Return the name of a specific column * * @param column - the column number of the column to return. * Columns are numbered from 0 * @return string& The name of the column * @throw ResultNoSuchColumnException The specified column does not exist in the result set */ const string& ResultSet::columnName(unsigned int column) const { if (column >= m_columns.size()) { throw new ResultNoSuchColumnException(); } return m_columns[column]->getName(); } /** * Return the type of a specific column * * @param column - the column number of the column to return. * Columns are numbered from 0 * @return ColumnType The type of the specified column * @throw ResultNoSuchColumnException The specified column does not exist in the result set */ ColumnType ResultSet::columnType(unsigned int column) const { if (column >= m_columns.size()) { throw new ResultNoSuchColumnException(); } return m_columns[column]->getType(); } /** * Return the type of a specific column * * @param name - the name of the column to return. * @return ColumnType The type of the specified column * @throw ResultNoSuchColumnException The specified column does not exist in the result set */ ColumnType ResultSet::columnType(const string& name) const { unsigned int column = findColumn(name); return m_columns[column]->getType(); } /** * Fetch an iterator for the rows in a result set. * The iterator is positioned at the first row in the * result set. */ ResultSet::RowIterator ResultSet::firstRow() { return m_rows.begin(); } /** * Given an iterator over the rows in a result set move to the * next row in the result set. * * @param it Iterator returned by the firstRow() method * @return RowIterator New value of the iterator * @throw ResultNoMoreRowsException There are no more rows in the result set */ ResultSet::RowIterator ResultSet::nextRow(RowIterator it) { if (it == m_rows.end()) throw new ResultNoMoreRowsException(); else return ++it; } /** * Given an iterator over the rows in a result set return if there * are any more rows in the result set. * * @param it Iterator returned by the firstRow() method * @return bool True if there are more rows in the result set */ bool ResultSet::hasNextRow(RowIterator it) const { return (it + 1) != m_rows.end(); } /** * Given an iterator over the rows in a result set return if there * this is the last row in the result set. * * @param it Iterator returned by the firstRow() method * @return bool True if there are no more rows in the result set */ bool ResultSet::isLastRow(RowIterator it) const { return (it + 1) == m_rows.end(); } /** * Return the type of the given column in this row. * * @param column The column number in the row, columns are numbered from 0 * @return ColumnType The column type of the specified column * @throw ResultNoSuchColumnException The specified column does not exist in the row */ ColumnType ResultSet::Row::getType(unsigned int column) { if (column > m_values.size()) throw new ResultNoSuchColumnException(); return m_values[column]->getType(); } /** * Return the type of the given column in this row. * * @param name The column name in the row * @return ColumnType The column type of the specified column * @throw ResultNoSuchColumnException The specified column does not exist in the row */ ColumnType ResultSet::Row::getType(const string& name) { unsigned int column = m_resultSet->findColumn(name); return m_values[column]->getType(); } /** * Return the column value of the given column in this row. * * @param column The column number in the row, columns are numbered from 0 * @return ColumnValue The column value of the specified column * @throw ResultNoSuchColumnException The specified column does not exist in the row */ ResultSet::ColumnValue *ResultSet::Row::getColumn(unsigned int column) const { if (column > m_values.size()) throw new ResultNoSuchColumnException(); return m_values[column]; } /** * Return the column value of the given column in this row. * * @param name The column name in the row * @return ColumnValue The column value of the specified column * @throw ResultNoSuchColumnException The specified column does not exist in the row */ ResultSet::ColumnValue *ResultSet::Row::getColumn(const string& name) const { unsigned int column = m_resultSet->findColumn(name); return m_values[column]; } /** * Find the named column in the result set and return the column index. * * @param name The name of the column to return * @return unsigned int The index of the named column * @throw ResultNoSuchColumnException The named column does not exist in the result set */ unsigned int ResultSet::findColumn(const string& name) const { for (unsigned int i = 0; i != m_columns.size(); i++) { if (m_columns[i]->getName().compare(name) == 0) { return i; } } throw ResultNoSuchColumnException(); } /** * Retrieve a column value as an integer * * @return long Integer value * @throw ResultIncorrectTypeException The column can not be returned as an integer */ long ResultSet::ColumnValue::getInteger() const { switch (m_type) { case INT_COLUMN: return m_value.ival; case NUMBER_COLUMN: return (long)m_value.fval; default: throw new ResultIncorrectTypeException(); } } /** * Retrieve a column value as a floating point number * * @return double Floating point value * @throw ResultIncorrectTypeException The column can not be returned as a double */ double ResultSet::ColumnValue::getNumber() const { switch (m_type) { case INT_COLUMN: return (double)m_value.ival; case NUMBER_COLUMN: return m_value.fval; default: throw new ResultIncorrectTypeException(); } } /** * Retrieve a column value as a string * * @return double Floating point value * @throw ResultIncorrectTypeException The column can not be returned as a double */ char *ResultSet::ColumnValue::getString() const { switch (m_type) { case STRING_COLUMN: return m_value.str; default: throw new ResultIncorrectTypeException(); } } ================================================ FILE: C/common/service_record.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <service_record.h> #include <string> #include <sstream> using namespace std; /** * Constructor for the service record */ ServiceRecord::ServiceRecord(const string& name, const string& type, const string& protocol, const string& address, const unsigned short port, const unsigned short managementPort, const string& token) : m_name(name), m_type(type), m_protocol(protocol), m_address(address), m_port(port), m_managementPort(managementPort), m_token(token) { } /** * Construct an incomplete service record with a name and type */ ServiceRecord::ServiceRecord(const string& name, const string& type) : m_name(name), m_type(type), m_protocol(""), m_address(""), m_port(0), m_managementPort(0) { } /** * Construct an incomplete service record with just a name */ ServiceRecord::ServiceRecord(const string& name) : m_name(name), m_type(""), m_protocol(""), m_address(""), m_port(0), m_managementPort(0) { } /** * Serialise the service record to json */ void ServiceRecord::asJSON(string& json) const { ostringstream convert; convert << "{ "; convert << "\"name\" : \"" << m_name << "\","; convert << "\"type\" : \"" << m_type << "\","; convert << "\"protocol\" : \"" << m_protocol << "\","; convert << "\"address\" : \"" << m_address << "\","; convert << "\"management_port\" : " << m_managementPort; if (m_port) { convert << ",\"service_port\" : " << m_port; } if (m_token != "") { convert << ",\"token\" : \"" << m_token << "\""; } convert << " }"; json = convert.str(); } ================================================ FILE: C/common/storage_client.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <storage_client.h> #include <reading.h> #include <reading_set.h> #include <reading_stream.h> #include <rapidjson/document.h> #include <rapidjson/error/en.h> #include <management_client.h> #include <service_record.h> #include <string> #include <sstream> #include <iostream> #include <thread> #include <map> #include <string_utils.h> #include <sys/uio.h> #include <errno.h> #include <stdarg.h> #define EXCEPTION_BUFFER_SIZE 120 #define INSTRUMENT 0 // Streaming is currently disabled due to an issue that causes the stream to // hang after a period. Set the followign to 1 in order to enable streaming #define ENABLE_STREAMING 0 #if INSTRUMENT #include <sys/time.h> #endif using namespace std; using namespace rapidjson; using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; // handles m_client_map access std::mutex sto_mtx_client_map; /** * Storage Client constructor */ StorageClient::StorageClient(const string& hostname, const unsigned short port) : m_streaming(false), m_management(NULL) { m_host = hostname; m_pid = getpid(); m_logger = Logger::getLogger(); m_urlbase << hostname << ":" << port; } /** * Storage Client constructor * stores the provided HttpClient into the map */ StorageClient::StorageClient(HttpClient *client) : m_streaming(false), m_management(NULL) { std::thread::id thread_id = std::this_thread::get_id(); sto_mtx_client_map.lock(); m_client_map[thread_id] = client; sto_mtx_client_map.unlock(); } /** * Destructor for storage client */ StorageClient::~StorageClient() { std::map<std::thread::id, HttpClient *>::iterator item; // Deletes all the HttpClient objects created in the map for (item = m_client_map.begin() ; item != m_client_map.end() ; ++item) { delete item->second; } } /** * Delete HttpClient object for current thread */ bool StorageClient::deleteHttpClient() { std::thread::id thread_id = std::this_thread::get_id(); lock_guard<mutex> guard(sto_mtx_client_map); if(m_client_map.find(thread_id) == m_client_map.end()) return false; ostringstream ss; ss << thread_id; Logger::getLogger()->debug("Storage client deleting HttpClient object @ %p for thread %s", m_client_map[thread_id], ss.str().c_str()); delete m_client_map[thread_id]; m_client_map.erase(thread_id); return true; } /** * Creates a HttpClient object for each thread * it stores/retrieves the reference to the HttpClient and the associated thread id in a map */ HttpClient *StorageClient::getHttpClient(void) { std::map<std::thread::id, HttpClient *>::iterator item; HttpClient *client; std::thread::id thread_id = std::this_thread::get_id(); sto_mtx_client_map.lock(); item = m_client_map.find(thread_id); if (item == m_client_map.end() ) { // Adding a new HttpClient client = new HttpClient(m_urlbase.str()); m_client_map[thread_id] = client; m_seqnum_map[thread_id].store(0); std::ostringstream ss; ss << std::this_thread::get_id(); } else { client = item->second; } sto_mtx_client_map.unlock(); return (client); } /** * Append a single reading */ bool StorageClient::readingAppend(Reading& reading) { try { ostringstream convert; convert << "{ \"readings\" : [ "; convert << reading.toJSON(); convert << " ] }"; auto res = this->getHttpClient()->request("POST", "/storage/reading", convert.str()); if (res->status_code.compare("200 OK") == 0) { return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Append readings", res->status_code, resultPayload.str()); return false; } catch (exception& ex) { handleException(ex, "append reading"); } return false; } /** * Append multiple readings * * TODO implement a mechanism to force streamed or non-streamed mode */ bool StorageClient::readingAppend(const vector<Reading *>& readings) { #if INSTRUMENT struct timeval start, t1, t2; #endif if (m_streaming) { return streamReadings(readings); } // See if we should switch to stream mode struct timeval tmFirst, tmLast, dur; readings[0]->getUserTimestamp(&tmFirst); readings[readings.size()-1]->getUserTimestamp(&tmLast); timersub(&tmLast, &tmFirst, &dur); double timeSpan = dur.tv_sec + ((double)dur.tv_usec / 1000000); double rate = (double)readings.size() / timeSpan; // Stream functionality disabled #if ENABLE_STREAMING if (rate > STREAM_THRESHOLD) { m_logger->info("Reading rate %.1f readings per second above threshold, attmempting to switch to stream mode", rate); if (openStream()) { m_logger->info("Successfully switch to stream mode for readings"); return streamReadings(readings); } m_logger->warn("Failed to switch to streaming mode"); } #endif static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { std::thread::id thread_id = std::this_thread::get_id(); ostringstream ss; sto_mtx_client_map.lock(); m_seqnum_map[thread_id].fetch_add(1); ss << m_pid << "#" << thread_id << "_" << m_seqnum_map[thread_id].load(); sto_mtx_client_map.unlock(); SimpleWeb::CaseInsensitiveMultimap headers = {{"SeqNum", ss.str()}}; #if INSTRUMENT gettimeofday(&start, NULL); #endif ostringstream convert; convert << "{ \"readings\" : [ "; for (vector<Reading *>::const_iterator it = readings.cbegin(); it != readings.cend(); ++it) { if (it != readings.cbegin()) { convert << ", "; } convert << (*it)->toJSON(); } convert << " ] }"; #if INSTRUMENT gettimeofday(&t1, NULL); #endif auto res = this->getHttpClient()->request("POST", "/storage/reading", convert.str(), headers); #if INSTRUMENT gettimeofday(&t2, NULL); #endif if (res->status_code.compare("200 OK") == 0) { #if INSTRUMENT struct timeval tm; timersub(&t1, &start, &tm); double buildTime, requestTime; buildTime = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t2, &t1, &tm); requestTime = tm.tv_sec + ((double)tm.tv_usec / 1000000); m_logger->info("Appended %d readings in %.3f seconds. Took %.3f seconds to build request", readings.size(), requestTime, buildTime); m_logger->info("%.1f Readings per second, request building %.2f%% of time", readings.size() / (buildTime + requestTime), (buildTime * 100) / (requestTime + buildTime)); m_logger->info("Request block size %dK", strlen(convert.str().c_str())/1024); #endif return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Append readings", res->status_code, resultPayload.str()); return false; } catch (exception& ex) { handleException(ex, "append readings"); } return false; } /** * Perform a generic query against the readings data * * @param query The query to execute * @return ResultSet The result of the query */ ResultSet *StorageClient::readingQuery(const Query& query) { try { ostringstream convert; convert << query.toJSON(); auto res = this->getHttpClient()->request("PUT", "/storage/reading/query", convert.str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); ResultSet *result = new ResultSet(resultPayload.str().c_str()); return result; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Query readings", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "query readings"); throw; } catch (exception* ex) { handleException(*ex, "query readings"); delete ex; throw exception(); } return 0; } /** * Perform a generic query against the readings data, * returning ReadingSet object * * @param query The query to execute * @return ReadingSet The result of the query */ ReadingSet *StorageClient::readingQueryToReadings(const Query& query) { try { ostringstream convert; convert << query.toJSON(); auto res = this->getHttpClient()->request("PUT", "/storage/reading/query", convert.str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); ReadingSet* result = new ReadingSet(resultPayload.str().c_str()); return result; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Query readings", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "query readings"); throw; } catch (exception* ex) { handleException(*ex, "query readings"); delete ex; throw exception(); } return 0; } /** * Retrieve a set of readings for sending on the northbound * interface of Fledge * * @param readingId The ID of the reading which should be the first one to send * @param count Maximum number if readings to return * @return ReadingSet The set of readings */ ReadingSet *StorageClient::readingFetch(const unsigned long readingId, const unsigned long count) { try { char url[256]; snprintf(url, sizeof(url), "/storage/reading?id=%lu&count=%lu", readingId, count); auto res = this->getHttpClient()->request("GET", url); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); ReadingSet *result = new ReadingSet(resultPayload.str()); return result; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Fetch readings", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "fetch readings"); throw; } catch (exception* ex) { handleException(*ex, "fetch readings"); delete ex; throw exception(); } return 0; } /** * Purge the readings by age * * @param age Number of hours old a reading has to be to be considered for purging * @param sent The ID of the last reading that was sent * @param purgeUnsent Flag to control if unsent readings should be purged * @return PurgeResult Data on the readings hat were purged */ PurgeResult StorageClient::readingPurgeByAge(unsigned long age, unsigned long sent, bool purgeUnsent) { try { char url[256]; snprintf(url, sizeof(url), "/storage/reading/purge?age=%ld&sent=%ld&flags=%s", age, sent, purgeUnsent ? "purge" : "retain"); auto res = this->getHttpClient()->request("PUT", url); ostringstream resultPayload; resultPayload << res->content.rdbuf(); if (res->status_code.compare("200 OK") == 0) { return PurgeResult(resultPayload.str()); } handleUnexpectedResponse("Purge by age", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "purge readings by age"); throw; } catch (exception* ex) { handleException(*ex, "purge readings by age"); delete ex; throw exception(); } return PurgeResult(); } /** * Purge the readings by size * * @param size Desired maximum size of readings table * @param sent The ID of the last reading that was sent * @param purgeUnsent Flag to control if unsent readings should be purged * @return PurgeResult Data on the readings hat were purged */ PurgeResult StorageClient::readingPurgeBySize(unsigned long size, unsigned long sent, bool purgeUnsent) { try { char url[256]; snprintf(url, sizeof(url), "/storage/reading/purge?size=%ld&sent=%ld&flags=%s", size, sent, purgeUnsent ? "purge" : "retain"); auto res = this->getHttpClient()->request("PUT", url); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); return PurgeResult(resultPayload.str()); } } catch (exception& ex) { handleException(ex, "purge readings by size"); throw; } catch (exception* ex) { handleException(*ex, "purge readings by size"); delete ex; throw exception(); } return PurgeResult(); } /** * Purge the readings by asset name * * @param asset The name of the asset to purge * @return PurgeResult Data on the readings that were purged */ PurgeResult StorageClient::readingPurgeByAsset(const string& asset) { try { char url[256]; snprintf(url, sizeof(url), "/storage/reading/purge?asset=%s", asset.c_str()); auto res = this->getHttpClient()->request("PUT", url); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); return PurgeResult(resultPayload.str()); } } catch (exception& ex) { handleException(ex, "purge readings by size"); throw; } catch (exception* ex) { handleException(*ex, "purge readings by size"); delete ex; throw exception(); } return PurgeResult(); } /** * Query a table * * @param tablename The name of the table to query * @param query The query payload * @return ResultSet* The resultset of the query */ ResultSet *StorageClient::queryTable(const std::string& tableName, const Query& query) { return queryTable(DEFAULT_SCHEMA, tableName, query); } /** * Query a table * * @param schema The name of the schema to query * @param tablename The name of the table to query * @param query The query payload * @return ResultSet* The resultset of the query */ ResultSet *StorageClient::queryTable(const std::string& schema, const std::string& tableName, const Query& query) { try { ostringstream convert; convert << query.toJSON(); char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s/query", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str()); ostringstream resultPayload; resultPayload << res->content.rdbuf(); if (res->status_code.compare("200 OK") == 0) { ResultSet *result = new ResultSet(resultPayload.str().c_str()); return result; } handleUnexpectedResponse("Query table", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "query table %s", tableName.c_str()); throw; } catch (exception* ex) { handleException(*ex, "query table %s", tableName.c_str()); delete ex; throw exception(); } return 0; } /** * Query a table and return a ReadingSet pointer * * @param tablename The name of the table to query * @param query The query payload * @return ReadingSet* The resultset of the query as * ReadingSet class pointer */ ReadingSet* StorageClient::queryTableToReadings(const std::string& tableName, const Query& query) { try { ostringstream convert; convert << query.toJSON(); char url[128]; snprintf(url, sizeof(url), "/storage/table/%s/query", tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str()); ostringstream resultPayload; resultPayload << res->content.rdbuf(); if (res->status_code.compare("200 OK") == 0) { ReadingSet* result = new ReadingSet(resultPayload.str().c_str()); return result; } handleUnexpectedResponse("Query table", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "query table %s to readings", tableName.c_str()); throw; } catch (exception* ex) { handleException(*ex, "query table %s to readings", tableName.c_str()); delete ex; throw exception(); } return 0; } /** * Insert data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @return int The number of rows inserted */ int StorageClient::insertTable(const string& tableName, const InsertValues& values) { return insertTable(DEFAULT_SCHEMA, tableName, values); } /** * Insert data into an arbitrary table * * @param schema The name of the schema to insert into * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @return int The number of rows inserted */ int StorageClient::insertTable(const string& schema, const string& tableName, const InsertValues& values) { try { ostringstream convert; convert << values.toJSON(); char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("POST", url, convert.str()); ostringstream resultPayload; resultPayload << res->content.rdbuf(); if (res->status_code.compare("200 OK") == 0 || res->status_code.compare("201 Created") == 0) { Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("POST result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of insertTable. %s. Document is %s", GetParseError_En(doc.GetParseError()), resultPayload.str().c_str()); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to append table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } handleUnexpectedResponse("Insert table", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "insert into table %s", tableName.c_str()); throw; } return 0; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param where The conditions to match the updated rows * @param modifier Optional storage modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, values, where, modifier); } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param where The conditions to match the updated rows * @param modifier Optional storage modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const Where& where, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { std::thread::id thread_id = std::this_thread::get_id(); ostringstream ss; sto_mtx_client_map.lock(); m_seqnum_map[thread_id].fetch_add(1); ss << m_pid << "#" << thread_id << "_" << m_seqnum_map[thread_id].load(); sto_mtx_client_map.unlock(); SimpleWeb::CaseInsensitiveMultimap headers = {{"SeqNum", ss.str()}}; ostringstream convert; convert << "{ \"updates\" : [ {"; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; } convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"values\" : "; convert << values.toJSON(); convert << " }"; convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str(), headers); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param values The expressions to update into the table * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, const ExpressionValues& values, const Where& where, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, values, where, modifier); } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param values The expressions to update into the table * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, const ExpressionValues& values, const Where& where, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { std::thread::id thread_id = std::this_thread::get_id(); ostringstream ss; sto_mtx_client_map.lock(); m_seqnum_map[thread_id].fetch_add(1); ss << m_pid << "#" << thread_id << "_" << m_seqnum_map[thread_id].load(); sto_mtx_client_map.unlock(); SimpleWeb::CaseInsensitiveMultimap headers = {{"SeqNum", ss.str()}}; ostringstream convert; convert << "{ \"updates\" : [ {"; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; } convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"expressions\" : "; convert << values.toJSON(); convert << " }"; convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str(), headers); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param updates The expressions and condition pairs to update in the table * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, vector<pair<ExpressionValues *, Where *>>& updates, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, updates, modifier); } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param updates The expressions and condition pairs to update in the table * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, vector<pair<ExpressionValues *, Where *>>& updates, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { std::thread::id thread_id = std::this_thread::get_id(); ostringstream ss; sto_mtx_client_map.lock(); m_seqnum_map[thread_id].fetch_add(1); ss << m_pid << "#" << thread_id << "_" << m_seqnum_map[thread_id].load(); sto_mtx_client_map.unlock(); SimpleWeb::CaseInsensitiveMultimap headers = {{"SeqNum", ss.str()}}; ostringstream convert; convert << "{ \"updates\" : [ "; for (vector<pair<ExpressionValues *, Where *>>::const_iterator it = updates.cbegin(); it != updates.cend(); ++it) { if (it != updates.cbegin()) { convert << ", "; } convert << "{ "; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; } convert << "\"where\" : "; convert << it->second->toJSON(); convert << ", \"expressions\" : "; convert << it->first->toJSON(); convert << " }"; } convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str(), headers); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param expressions The expression to update inthe table * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, values, expressions, where, modifier); } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param expressions The expression to update inthe table * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const ExpressionValues& expressions, const Where& where, const UpdateModifier *modifier) { try { ostringstream convert; convert << "{ \"updates\" : [ { "; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; } convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"values\" : "; convert << values.toJSON(); convert << ", \"expressions\" : "; convert << expressions.toJSON(); convert << " }"; convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param json The values to insert into the table * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, const JSONProperties& values, const Where& where, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, values, where, modifier); } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param json The values to insert into the table * @param where The conditions to match the updated rows * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, const JSONProperties& values, const Where& where, const UpdateModifier *modifier) { try { ostringstream convert; convert << "{ \"updates\" : [ {"; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ]"; } convert << "\"where\" : "; convert << where.toJSON(); convert << ", "; convert << values.toJSON(); convert << " }"; convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param jsonProp The JSON Properties to update * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, const InsertValues& values, const JSONProperties& jsonProp, const Where& where, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, values, jsonProp, where, modifier); } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @param jsonProp The JSON Properties to update * @param where The conditions to match the updated rows * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, const InsertValues& values, const JSONProperties& jsonProp, const Where& where, const UpdateModifier *modifier) { try { ostringstream convert; convert << "{ \"updates\" : [ {"; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\", "; } convert << "\"where\" : "; convert << where.toJSON(); convert << ", \"values\" : "; convert << values.toJSON(); convert << ", "; convert << jsonProp.toJSON(); convert << " }"; convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Delete from a table * * @param tablename The name of the table to delete from * @param query The query payload to match rows to delete * @return int The number of rows deleted */ int StorageClient::deleteTable(const std::string& tableName, const Query& query) { return deleteTable(DEFAULT_SCHEMA, tableName, query); } /** * Delete from a table * * @param schema The name of the schema to delete from * @param tablename The name of the table to delete from * @param query The query payload to match rows to delete * @return int The number of rows deleted */ int StorageClient::deleteTable(const std::string& schema, const std::string& tableName, const Query& query) { try { ostringstream convert; convert << query.toJSON(); char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("DELETE", url, convert.str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of deleteTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to delete table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Delete from table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "delete table date in %s", tableName.c_str()); throw; } return -1; } /** * Standard logging method for all interactions * * @param operation The operation being undertaken * @param table The name of the table * @param responseCode The HTTP response code * @param payload The payload in the response message */ void StorageClient::handleUnexpectedResponse(const char *operation, const string& table, const string& responseCode, const string& payload) { string op(operation); op += " "; op += table; handleUnexpectedResponse(op.c_str(), responseCode, payload); } /** * Standard logging method for all interactions * * @param operation The operation being undertaken * @param responseCode The HTTP response code * @param payload The payload in the response message */ void StorageClient::handleUnexpectedResponse(const char *operation, const string& responseCode, const string& payload) { Document doc; doc.Parse(payload.c_str()); if (!doc.HasParseError()) { if (doc.HasMember("message")) { m_logger->info("%s completed with result %s", operation, responseCode.c_str()); m_logger->error("%s: %s", operation, doc["message"].GetString()); } } else { m_logger->error("%s completed with result %s", operation, responseCode.c_str()); } } /** * Register interest for a Reading asset name * * @param assetName The asset name to register * for readings data notification * @param callbackUrl The callback URL to send readings data. * @return True on success, false otherwise. */ bool StorageClient::registerAssetNotification(const string& assetName, const string& callbackUrl) { try { ostringstream convert; convert << "{ \"url\" : \""; convert << callbackUrl; convert << "\" }"; auto res = this->getHttpClient()->request("POST", "/storage/reading/interest/" + urlEncode(assetName), convert.str()); if (res->status_code.compare("200 OK") == 0) { return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Register asset", assetName, res->status_code, resultPayload.str()); m_logger->error("/storage/reading/interest/%s: %s", urlEncode(assetName).c_str(), res->status_code.c_str()); return false; } catch (exception& ex) { handleException(ex, "register asset '%s'", assetName.c_str()); } return false; } /** * Unregister interest for a Reading asset name * * @param assetName The asset name to unregister * for readings data notification * @param callbackUrl The callback URL provided in registration. * @return True on success, false otherwise. */ bool StorageClient::unregisterAssetNotification(const string& assetName, const string& callbackUrl) { try { ostringstream convert; convert << "{ \"url\" : \""; convert << callbackUrl; convert << "\" }"; auto res = this->getHttpClient()->request("DELETE", "/storage/reading/interest/" + urlEncode(assetName), convert.str()); if (res->status_code.compare("200 OK") == 0) { return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Unregister asset", assetName, res->status_code, resultPayload.str()); return false; } catch (exception& ex) { handleException(ex, "unregister asset '%s'", assetName.c_str()); } return false; } /** * Register interest for a table * * @param tableName The table name to register for notification * @param tableKey The key of interest in the table * @param tableKeyValues The key values of interest * @param tableOperation The table operation of interest (insert/update/delete) * @param callbackUrl The callback URL to send change data * @return True on success, false otherwise. */ bool StorageClient::registerTableNotification(const string& tableName, const string& key, std::vector<std::string> keyValues, const string& operation, const string& callbackUrl) { try { ostringstream keyValuesStr; for (auto & s : keyValues) { keyValuesStr << "\"" << s << "\""; if (&s != &keyValues.back()) keyValuesStr << ", "; } ostringstream convert; convert << "{ "; convert << "\"url\" : \"" << callbackUrl << "\", "; convert << "\"key\" : \"" << key << "\", "; convert << "\"values\" : [" << keyValuesStr.str() << "], "; convert << "\"operation\" : \"" << operation << "\" "; convert << "}"; auto res = this->getHttpClient()->request("POST", "/storage/table/interest/" + urlEncode(tableName), convert.str()); if (res->status_code.compare("200 OK") == 0) { return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Register table", tableName, res->status_code, resultPayload.str()); m_logger->error("POST /storage/table/interest/%s: %s", urlEncode(tableName).c_str(), res->status_code.c_str()); return false; } catch (exception& ex) { handleException(ex, "register table '%s'", tableName.c_str()); } return false; } /** * Unregister interest for a table name * * @param tableName The table name to unregister interest in * @param tableKey The key of interest in the table * @param tableKeyValues The key values of interest * @param tableOperation The table operation of interest (insert/update/delete) * @param callbackUrl The callback URL to send change data * @return True on success, false otherwise. */ bool StorageClient::unregisterTableNotification(const string& tableName, const string& key, std::vector<std::string> keyValues, const string& operation, const string& callbackUrl) { try { ostringstream keyValuesStr; for (auto & s : keyValues) { keyValuesStr << "\"" << s << "\""; if (&s != &keyValues.back()) keyValuesStr << ", "; } ostringstream convert; convert << "{ "; convert << "\"url\" : \"" << callbackUrl << "\", "; convert << "\"key\" : \"" << key << "\", "; convert << "\"values\" : [" << keyValuesStr.str() << "], "; convert << "\"operation\" : \"" << operation << "\" "; convert << "}"; auto res = this->getHttpClient()->request("DELETE", "/storage/table/interest/" + urlEncode(tableName), convert.str()); if (res->status_code.compare("200 OK") == 0) { return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Unregister table", tableName, res->status_code, resultPayload.str()); m_logger->error("DELETE /storage/table/interest/%s: %s", urlEncode(tableName).c_str(), res->status_code.c_str()); return false; } catch (exception& ex) { handleException(ex, "unregister table '%s'", tableName.c_str()); } return false; } /* * Attempt to open a streaming connection to the storage service. We use a REST API * call to create the stream. If successful this call will return a port and a token * to use when sending data via the stream. * * @return bool Return true if the stream was setup */ bool StorageClient::openStream() { try { auto res = this->getHttpClient()->request("POST", "/storage/reading/stream"); m_logger->info("POST /storage/reading/stream returned: %s", res->status_code.c_str()); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("POST result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of createStream. %s. Document is %s", GetParseError_En(doc.GetParseError()), resultPayload.str().c_str()); return false; } else if (doc.HasMember("message")) { m_logger->error("Failed to switch to stream mode: %s", doc["message"].GetString()); return false; } int port, token; if ((!doc.HasMember("port")) || (!doc.HasMember("token"))) { m_logger->error("Missing items in stream creation response"); return false; } port = doc["port"].GetInt(); token = doc["token"].GetInt(); if ((m_stream = socket(AF_INET, SOCK_STREAM, 0)) == -1) { m_logger->error("Unable to create socket"); return false; } struct sockaddr_in serv_addr; hostent *server; if ((server = gethostbyname(m_host.c_str())) == NULL) { m_logger->error("Unable to resolve hostname for reading stream: %s", m_host.c_str()); return false; } bzero((char *) &serv_addr, sizeof(serv_addr)); serv_addr.sin_family = AF_INET; bcopy((char *)server->h_addr, (char *)&serv_addr.sin_addr.s_addr, server->h_length); serv_addr.sin_port = htons(port); if (connect(m_stream, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) { Logger::getLogger()->warn("Unable to connect to storage streaming server: %s, %d", m_host.c_str(), port); return false; } RDSConnectHeader conhdr; conhdr.magic = RDS_CONNECTION_MAGIC; conhdr.token = token; if (write(m_stream, &conhdr, sizeof(conhdr)) != sizeof(conhdr)) { Logger::getLogger()->warn("Failed to write connection header: %s", strerror(errno)); return false; } m_streaming = true; m_logger->info("Storage stream succesfully created"); return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Create reading stream", res->status_code, resultPayload.str()); return false; } catch (exception& ex) { handleException(ex, "create reading stream"); } m_logger->error("Fallen through!"); return false; } /** * Stream a set of readings to the storage service. * * The stream uses a TCP connection to the storage system, it sends * blocks of readings to the storage engine and bypasses the usual * JSON conversion and imoprtantly parsing on the storage system * side. * * A block of readings is introduced by a block header, the block * header contains a magic number, the block number and the count * of the number of readings in a block. * * Each reading within the block is preceeded by a reading header * that contains a magic number, a reading number within the block, * The length of the asset name for the reading, the length of the * payload within the reading. The reading itself follows the herader * and consists of the timestamp as a binary timeval structure, the name * of the asset, including the null terminator. If the asset name length * is 0 then no asset name is sent and the name of the asset is the same * as the previous asset in the block. Following this the paylod is included. * * Each block is sent to the storage layer in a number of chunks rather * that a single write per block. The implementation make use of the * Linux scatter/gather IO calls to reduce the number of copies of data * that are required. * * Currently there is no acknowledement handling as TCP is used as the underlying * transport and the TCP acknowledgement is assumed to be a good enough * indication of delivery. * * TODO Deal with acknowledgements, add error checking/recovery * * @param readings The readings to stream * @return bool True if the readings have been sent */ bool StorageClient::streamReadings(const std::vector<Reading *> & readings) { RDSBlockHeader blkhdr; RDSReadingHeader rdhdrs[STREAM_BLK_SIZE]; register RDSReadingHeader *phdr; struct iovec iovs[STREAM_BLK_SIZE * 4], *iovp; string payloads[STREAM_BLK_SIZE]; struct timeval tm[STREAM_BLK_SIZE]; ssize_t n, length = 0; string lastAsset; if (!m_streaming) { m_logger->warn("Attempt to send data via a storage stream when streaming is not setup"); return false; } /* * Assemble and write the block header. This header contains information * to synchronise the blocks of data and also the number of readings * to expect within the block. */ blkhdr.magic = RDS_BLOCK_MAGIC; blkhdr.blockNumber = m_readingBlock++; blkhdr.count = readings.size(); if ((n = write(m_stream, &blkhdr, sizeof(blkhdr))) != sizeof(blkhdr)) { if (errno == EPIPE || errno == ECONNRESET) { Logger::getLogger()->error("Storage service has closed stream unexpectedly"); m_streaming = false; } else { Logger::getLogger()->error("Failed to write block header: %s", strerror(errno)); } return false; } /* * Use the writev scatter/gather interface to send the reading headers and reading data. * We sent chunks of data in order to allow the parallel sending and unpacking process * at the two ends. The chunk size is STREAM_BLK_SIZE readings. */ iovp = iovs; phdr = rdhdrs; int offset = 0; for (int i = 0; i < readings.size(); i++) { phdr->magic = RDS_READING_MAGIC; phdr->readingNo = i; string assetCode = readings[i]->getAssetName(); if (i > 0 && assetCode.compare(lastAsset) == 0) { // Asset name is unchanged so don't send it phdr->assetLength = 0; } else { // Asset name has changed or this is the first asset in the block lastAsset = assetCode; phdr->assetLength = assetCode.length() + 1; } // Always generate the JSON variant of the data points and send payloads[offset] = readings[i]->getDatapointsJSON(); phdr->payloadLength = payloads[offset].length() + 1; // Add the reading header iovp->iov_base = phdr; iovp->iov_len = sizeof(RDSReadingHeader); length += iovp->iov_len; iovp++; // Reading user timestamp readings[i]->getUserTimestamp(&tm[offset]); iovp->iov_base = &tm[offset]; iovp->iov_len = sizeof(struct timeval); length += iovp->iov_len; iovp++; // If the asset code has changed than add that if (phdr->assetLength) { iovp->iov_base = (void *)(readings[i]->getAssetName().c_str()); // Cast away const due to iovec definition iovp->iov_len = phdr->assetLength; length += iovp->iov_len; iovp++; } // Add the data points themselves iovp->iov_base = (void *)(payloads[offset].c_str()); // Cast away const due to iovec definition iovp->iov_len = phdr->payloadLength; length += iovp->iov_len; iovp++; offset++; if (offset == STREAM_BLK_SIZE - 1) { if (iovp - iovs > STREAM_BLK_SIZE * 4) Logger::getLogger()->error("Too many iov blocks %d", iovp - iovs); // Send a chunk of readings in the block n = writev(m_stream, (const iovec *)iovs, iovp - iovs); if (n == -1) { if (errno == EPIPE || errno == ECONNRESET) { Logger::getLogger()->error("Stream has been closed by the storage service"); m_streaming = false; } Logger::getLogger()->error("Write of block %d filed: %s", m_readingBlock - 1, strerror(errno)); return false; } else if (n < length) { Logger::getLogger()->error("Write of block short, %d < %d: %s", n, length, strerror(errno)); return false; } else if (n > length) { Logger::getLogger()->fatal("Long write %d < %d", length, n); } offset = 0; length = 0; iovp = iovs; phdr = rdhdrs; } else { phdr++; } } if (length) // Remaining data to be sent to finish the block { n = writev(m_stream, (const iovec *)iovs, iovp - iovs); if (n == -1) { if (errno == EPIPE || errno == ECONNRESET) { Logger::getLogger()->error("Stream has been closed by the storage service"); m_streaming = false; } Logger::getLogger()->error("Write of block %d filed: %s", m_readingBlock - 1, strerror(errno)); return false; } else if (n < length) { Logger::getLogger()->error("Write of block short, %d < %d: %s", n, length, strerror(errno)); return false; } else if (n > length) { Logger::getLogger()->fatal("Long write %d < %d", length, n); } } Logger::getLogger()->info("Written block of %d readings via streaming connection", readings.size()); return true; } /** * Handle exceptions encountered when communicating to the storage system * * @param ex The exception we are handling */ void StorageClient::handleException(const exception& ex, const char *operation, ...) { char buf[EXCEPTION_BUFFER_SIZE]; va_list ap; va_start(ap, operation); vsnprintf(buf, sizeof(buf), operation, ap); va_end(ap); // Firstly deal with not flooding the log with repeated exceptions const char *what = ex.what(); if (m_lastException.empty()) // First exception { m_lastException = what; m_exRepeat = 0; m_backoff = SC_INITIAL_BACKOFF; m_logger->error("Failed to %s: %s", buf, m_lastException.c_str()); } else if (m_lastException.compare(what) == 0) { m_exRepeat++; if ((m_exRepeat % m_backoff) == 0) { if (m_backoff < SC_MAX_BACKOFF) m_backoff *= 2; m_logger->error("Storage client repeated failure: %s", m_lastException.c_str()); } } else { m_logger->error("Storage client failure: %s repeated %d times", m_lastException.c_str(), m_exRepeat); m_backoff = SC_INITIAL_BACKOFF; m_lastException = what; m_logger->error("Failed to %s: %s", buf, m_lastException.c_str()); } // Now implement some recovery strategies if (m_lastException.compare("Connection refused") == 0) { // This is probably because the storage service has gone down if (m_management) { // Get a handle on the storage layer ServiceRecord storageRecord("Fledge Storage"); if (!m_management->getService(storageRecord)) { m_logger->fatal("Unable to find a storage service from service registry, exiting..."); exit(1); } m_urlbase << storageRecord.getAddress() << ":" << storageRecord.getPort(); } if (m_exRepeat >= SC_INITIAL_BACKOFF * 2) { // We clearly tried to recover a number of times without success, simply exit at this stage m_logger->fatal("Storage service appears to have failed and unable to connect to core, exiting..."); exit(1); } } } /** * Function to create Storage Schema */ bool StorageClient::createSchema(const std::string& payload) { try { auto res = this->getHttpClient()->request("POST", "/storage/schema", payload.c_str()); if (res->status_code.compare("200 OK") == 0) { return true; } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Post Storage Schema", res->status_code, resultPayload.str()); return false; } catch (exception& ex) { handleException(ex, "post storage schema"); } return false; } /** * Update data into an arbitrary table * * @param schema The name of the schema into which data will be added * @param tableName The name of the table into which data will be added * @param updates The values and condition pairs to update in the table * @param modifier Optional update modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& schema, const string& tableName, std::vector<std::pair<InsertValue*, Where*> >& updates, const UpdateModifier *modifier) { static HttpClient *httpClient = this->getHttpClient(); // to initialize m_seqnum_map[thread_id] for this thread try { std::thread::id thread_id = std::this_thread::get_id(); ostringstream ss; sto_mtx_client_map.lock(); m_seqnum_map[thread_id].fetch_add(1); ss << m_pid << "#" << thread_id << "_" << m_seqnum_map[thread_id].load(); sto_mtx_client_map.unlock(); SimpleWeb::CaseInsensitiveMultimap headers = {{"SeqNum", ss.str()}}; ostringstream convert; convert << "{ \"updates\" : [ "; for (vector<pair<InsertValue *, Where *>>::const_iterator it = updates.cbegin(); it != updates.cend(); ++it) { if (it != updates.cbegin()) { convert << ", "; } convert << "{ "; if (modifier) { convert << "\"modifiers\" : [ \"" << modifier->toJSON() << "\" ], "; } convert << "\"where\" : "; convert << it->second->toJSON(); convert << ", \"values\" : "; convert << " { " << it->first->toJSON() << " } "; convert << " }"; } convert << " ] }"; char url[128]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("PUT", url, convert.str(), headers); if (res->status_code.compare("200 OK") == 0) { ostringstream resultPayload; resultPayload << res->content.rdbuf(); Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("PUT result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of updateTable. %s", GetParseError_En(doc.GetParseError())); return -1; } else if (doc.HasMember("message")) { m_logger->error("Failed to update table data: %s", doc["message"].GetString()); return -1; } return doc["rows_affected"].GetInt(); } ostringstream resultPayload; resultPayload << res->content.rdbuf(); handleUnexpectedResponse("Update table", tableName, res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "update table %s", tableName.c_str()); throw; } return -1; } /** * Update data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param updates The values to insert into the table * @param modifier Optional storage modifier * @return int The number of rows updated */ int StorageClient::updateTable(const string& tableName, std::vector<std::pair<InsertValue*, Where*> >& updates, const UpdateModifier *modifier) { return updateTable(DEFAULT_SCHEMA, tableName, updates, modifier); } /** * Insert data into an arbitrary table * * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @return int The number of rows inserted */ int StorageClient::insertTable(const string& tableName, const std::vector<InsertValues>& values) { return insertTable(DEFAULT_SCHEMA, tableName, values); } /** * Insert data into an arbitrary table * * @param schema The name of the schema to insert into * @param tableName The name of the table into which data will be added * @param values The values to insert into the table * @return int The number of rows inserted */ int StorageClient::insertTable(const string& schema, const string& tableName, const std::vector<InsertValues>& values) { try { ostringstream convert; convert << "{ \"inserts\": [" ; for (std::vector<InsertValues>::const_iterator it = values.cbegin(); it != values.cend(); ++it) { if (it != values.cbegin()) { convert << ", "; } convert << it->toJSON() ; } convert << "]}"; char url[1000]; snprintf(url, sizeof(url), "/storage/schema/%s/table/%s", schema.c_str(), tableName.c_str()); auto res = this->getHttpClient()->request("POST", url, convert.str()); ostringstream resultPayload; resultPayload << res->content.rdbuf(); if (res->status_code.compare("200 OK") == 0 || res->status_code.compare("201 Created") == 0) { Document doc; doc.Parse(resultPayload.str().c_str()); if (doc.HasParseError()) { m_logger->info("POST result %s.", res->status_code.c_str()); m_logger->error("Failed to parse result of insertTable. %s. Document is %s", GetParseError_En(doc.GetParseError()), resultPayload.str().c_str()); return -1; } else if (doc.HasMember("rows_affected")) { return doc["rows_affected"].GetInt(); } } handleUnexpectedResponse("Insert table", res->status_code, resultPayload.str()); } catch (exception& ex) { handleException(ex, "insert into table %s", tableName.c_str()); throw; } return 0; } ================================================ FILE: C/common/string_utils.cpp ================================================ /* * Fledge utilities functions for handling JSON document * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli, Massimiliano Pinto */ #include <iostream> #include <string> #include "string_utils.h" #include <logger.h> #include <stdio.h> #include <string.h> using namespace std; /** * Search and replace a string * * @param out StringToManage string in which apply the search and replacement * @param StringToSearch string to search and replace * @param StringToReplace substitution string * */ void StringReplace(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringReplacement) { if (StringToManage.find(StringToSearch) != string::npos) { StringToManage.replace(StringToManage.find(StringToSearch), StringToSearch.length(), StringReplacement); } } /** * Search and replace all the occurances of a string * * @param out StringToManage string in which apply the search and replacement * @param StringToSearch string to search and replace * @param StringToReplace substitution string * */ void StringReplaceAll(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringReplacement) { while (StringToManage.find(StringToSearch) != string::npos) { StringReplace(StringToManage,StringToSearch, StringReplacement); } } /** * Removes the last level of the hierarchy * */ std::string evaluateParentPath(const std::string& path, char separator) { std::string parent; parent = path; if (parent.length() > 1) { if (parent.find(separator) != string::npos) { while (parent.back() != separator) { parent.erase(parent.size() - 1); } if (parent.back() == separator) { parent.erase(parent.size() - 1); } } } return parent; } /** * Extract last level of the hierarchy * */ std::string extractLastLevel(const std::string& path, char separator) { std::string level; std::string tmpPath; char end_char; tmpPath = path; if (tmpPath.length() > 0) { if (tmpPath.find(separator) != string::npos) { end_char = tmpPath.back(); while (end_char != separator) { level.insert(0, 1, end_char); tmpPath.erase(tmpPath.size() - 1); end_char = tmpPath.back(); } } else { level = path; } } return level; } /** * Removes slash when not needed, at the beggining and at the end, * substitutes // with / * * @param stringToFix string to handle * */ std::string StringSlashFix(const std::string& stringToFix) { std::string stringFixed; stringFixed = stringToFix; if (!stringFixed.empty()) { char singleChar; // Remove first char if '/' for (singleChar = stringFixed.front() ; singleChar == '/' ; singleChar = stringFixed.front()) { stringFixed.erase(0, 1); } // Remove last char if '/' for (singleChar = stringFixed.back() ; singleChar == '/' ; singleChar = stringFixed.back()) { stringFixed.pop_back(); } // Substitute // with / while (stringFixed.find("//") != string::npos) { StringReplace(stringFixed, "//", "/"); } } return stringFixed; } /** * Strips Line feed and/or carriage return * * @param StringToManage The string to strip */ void StringStripCRLF(std::string& StringToManage) { string::size_type pos = 0; while ((pos = StringToManage.find ('\r',pos)) != string::npos) { StringToManage.erase ( pos, 1 ); } pos = 0; while ((pos = StringToManage.find ('\n',pos)) != string::npos) { StringToManage.erase ( pos, 1 ); } } /** * Stripes " from the string * */ void StringStripQuotes(std::string& StringToManage) { if ( ! StringToManage.empty()) { StringReplaceAll(StringToManage, "\"", ""); } } /** * Removes all the white spaces from a string * */ string StringStripWhiteSpacesAll(const std::string& original) { string output; output = original; for (size_t i = 0; i < output.length(); ) { if (isspace(output[i])) { output.erase(i, 1); } else { i++; } } return (output); } /** * Removes all the spaces at both ends of a string and * removes all the white spaces except 1 space * */ string StringStripWhiteSpacesExtra(const std::string& original) { int cSpace; string output; output = StringRTrim(StringLTrim(original)); cSpace = 0; for (size_t i = 0; i < output.length(); ) { if (output[i] == ' ') { cSpace++; if (cSpace > 1) { output.erase(i, 1); } else { i++; } } else { if (isspace(output[i])) { output.erase(i, 1); } else { i++; cSpace = 0; } } } return (output); } /** * URL-encode a given string * * @param s Input string that is to be URL-encoded * @return URL-encoded output string */ string urlEncode(const string &s) { ostringstream escaped; escaped.fill('0'); escaped << hex; for (string::const_iterator i = s.begin(), n = s.end(); i != n; ++i) { string::value_type c = (*i); // Keep alphanumeric and other accepted characters intact if (isalnum(c) || c == '-' || c == '_' || c == '.' || c == '~') { escaped << c; continue; } // Any other characters are percent-encoded escaped << uppercase; escaped << '%' << setw(2) << int((unsigned char) c); escaped << nouppercase; } return escaped.str(); } /** * Check if a char is an hex value * * @param c The input char * @return True with hex value * false otherwise */ static inline bool ishex (const char c) { if (isdigit(c) || c=='A' || c=='B' || c=='C' || c=='D' || c=='E' || c=='F') { return true; } else { return false; } } /** * URL decode of a given string * * @param name The string to decode * @return The URL decoded string * * In case of decoding errors the routine returns * current decoded string */ string urlDecode(const std::string& name) { std::string decoded(name); char* s = (char *)name.c_str(); char* dec = (char *)decoded.c_str(); char* o; const char* end = s + name.length(); int c; for (o = dec; s <= end; o++) { c = *s++; if (c == '+') { c = ' '; } else if (c == '%' && (!ishex(*s++) || !ishex(*s++) || !sscanf(s - 2, "%2x", &c))) { break; } if (dec) { *o = c; } } return string(dec); } /** * Escape all double quotes characters in the string * * @param str The string to escape */ void StringEscapeQuotes(std::string& str) { for (size_t i = 0; i < str.length(); i++) { if (str[i] == '\"' && (i == 0 || str[i-1] != '\\')) { str.replace(i, 1, "\\\""); } } } /** * Remove space at both ends of a string */ char *trim(char *str) { char *ptr; while (*str && *str == ' ') str++; ptr = str + strlen(str) - 1; while (ptr > str && *ptr == ' ') { *ptr = 0; ptr--; } return str; } /** * Remove spaces at the left side of a string */ std::string StringLTrim(const std::string& str) { string output; size_t pos = str.find_first_not_of(" "); if (pos == std::string::npos) output = ""; else output = str.substr(pos); return (output); } /** * Remove spaces at the right side of a string */ std::string StringRTrim(const std::string& str) { string output; size_t pos = str.find_last_not_of(" "); if (pos == std::string::npos) output = ""; else output = str.substr(0, pos + 1); return (output); } /** * Remove spaces at both ends of a string */ std::string StringTrim(const std::string& str) { return StringRTrim(StringLTrim(str)); } /** * Evaluates if the input string is a regular expression */ bool IsRegex(const string &str) { size_t nChar; nChar = strcspn(str.c_str(), "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_"); return (nChar != 0); } /** * Return a new string that extracts from the passed in string either side * of a position within the string. * * @param str The string to return a portion of * @param pos The position around which to extract a portion * @param after The number of characters after the position to return, defaults to 30 if omitted * @param before The number of characters before the position to return, defaults to 10 */ std::string StringAround(const std::string& str, unsigned int pos, unsigned int after, unsigned int before) { size_t start = pos > before ? (pos - before) : 0; size_t len = before + after; return str.substr(start, len); } /** * Search and duplicate all the occurances of a string * * @param out StringToManage string in which apply the search * @param StringToSearch string to search * @param StringToChange substitution string * */ void StringReplaceAllEx(std::string& StringToManage, const std::string& StringToSearch, const std::string& StringToChange) { size_t pos = 0; while ((pos = StringToManage.find(StringToSearch, pos)) != std::string::npos) { StringToManage.replace(pos, StringToSearch.length(), StringToChange); pos += StringToChange.length(); // Move past the last replaced substring } } /** * Escape double quotes, forward and backword slash * * @param str The string to escape * @return The escaped string */ std::string escape(const std::string& str) { size_t pos = 0; if (str.find("\"", pos) == std::string::npos && str.find("\\", pos) == std::string::npos && str.find("/", pos) == std::string::npos) return str; //return if none of the following character exists '"' , "\" , "/" std::string rval; int bscount = 0; for (size_t i = 0; i < str.length(); i++) { if (str[i] == '\\') { if (i + 1 < str.length() && (str[i + 1] == '"' || str[i + 1] == '\\' || str[i + 1] == '/' || str[i-1] == '\\')) { rval += '\\'; } else { rval += "\\\\"; } bscount++; } else if (str[i] == '\"') { if ((bscount & 1) == 0) // not already escaped { rval += "\\"; // Add escape of " } rval += str[i]; bscount = 0; } else { rval += str[i]; bscount = 0; } } return rval; } ================================================ FILE: C/common/where.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <where.h> #include <string> #include <sstream> #include <iostream> using namespace std; /** * Where clause destructor */ Where::~Where() { if (m_or) { delete m_or; } if (m_and) { delete m_and; } } /** * Return the JSON payload for a where clause */ const string Where::toJSON() const { ostringstream json; json << "{ \"column\" : \"" << m_column << "\", "; json << "\"condition\" : \""; switch (m_condition) { case Older: json << "older"; break; case Newer: json << "newer"; break; case Equals: json << "="; break; case NotEquals: json << "!="; break; case LessThan: json << "<"; break; case GreaterThan: json << ">"; break; case In: json << "in"; break; case IsNull: json << "isnull"; break; case NotNull: json << "notnull"; break; } json << "\""; if (m_condition != IsNull && m_condition != NotNull) { json << ", "; if ( (m_condition == Older) || (m_condition == Newer) ) { json << "\"value\" : " << m_value << ""; } else if (m_condition != In) { json << "\"value\" : \"" << m_value << "\""; } else { json << "\"value\" : ["; for (auto v = m_in.begin(); v != m_in.end(); ++v) { json << "\"" << *v << "\""; if (next(v, 1) != m_in.end()) { json << ", "; } } json << "]"; } } if (m_and || m_or) { if (m_and) { json << ", \"and\" : " << m_and->toJSON(); } if (m_or) { json << ", \"or\" : " << m_or->toJSON(); } } json << " }"; return json.str(); } ================================================ FILE: C/plugins/common/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(plugins-common-lib) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(NEEDED_FLEDGE_LIBS common-lib services-common-lib) set(LIBCURL_LIB -lcurl) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) # Find source files file(GLOB SOURCES *.cpp) # Include header files include_directories(include) include_directories(../../common/include) include_directories(../../services/common/include) include_directories(../../thirdparty/Simple-Web-Server) include_directories(../../thirdparty/rapidjson/include) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES} ${NEEDED_FLEDGE_LIBS} z ssl crypto) target_link_libraries(${PROJECT_NAME} ${LIBCURL_LIB}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/plugins/common/http_sender.cpp ================================================ /* * Fledge HTTP Sender wrapper. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Mark Riddoch */ #include <http_sender.h> #include <unistd.h> #include <string_utils.h> #include <sstream> // for std::stringstream #include <fstream> #include <logger.h> #include <utils.h> #include <sys/stat.h> #include <sys/types.h> using namespace std; /** * Constructor */ HttpSender::HttpSender() { } /** * Destructor */ HttpSender::~HttpSender() { } /** * @brief Creates the '/logs/debug-trace' subdirectory in the Fledge data directory. * * This function ensures that both the 'logs' directory and the 'debug-trace' directory are created if they do not exist. */ bool HttpSender::createDebugTraceDirectory() { // Retrieve the 'logs' and 'debug-trace' directory paths std::string logsDir = getDataDir() + "/logs"; std::string debugTraceDir = logsDir + "/debug-trace"; // Ensure path consistency with getDebugTracePath(). Assert is commented out to prevent unexpected runtime interruptions in execution // std::assert(debugTraceDir == getDebugTracePath()); auto createDir = [](const std::string& dirPath) -> bool { struct stat dirInfo; if (stat(dirPath.c_str(), &dirInfo) == 0) { if (dirInfo.st_mode & S_IFDIR) { return true; // Directory exists } else { Logger::getLogger()->error("Path exists but is not a directory: %s", dirPath.c_str()); return false; } } else { // Directory does not exist, attempt to create it if (mkdir(dirPath.c_str(), 0755) == 0) { return true; // Success } else { return false; } } }; // Create the logs directory if it does not exist if (!createDir(logsDir)) { Logger::getLogger()->error("Failed to create directory: %s. 'debug-trace' directory will not be created.", logsDir.c_str()); return false; } // Create the debug-trace directory if it does not exist if (!createDir(debugTraceDir)) { Logger::getLogger()->error("Failed to create 'debug-trace' directory: %s.", debugTraceDir.c_str()); return false; } return true; } /** * @brief Constructs the file path for the OMF log. * * @return A string representing the path to the OMF log file. */ std::string HttpSender::getOMFTracePath() { return getDebugTracePath() + "/omf.log"; } ================================================ FILE: C/plugins/common/include/http_sender.h ================================================ #ifndef _HTTP_SENDER_H #define _HTTP_SENDER_H /* * Fledge HTTP Sender wrapper. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <string> #include <vector> #define HTTP_SENDER_USER_AGENT "Fledge http sender" #define HTTP_SENDER_DEFAULT_METHOD "GET" #define HTTP_SENDER_DEFAULT_PATH "/" class HttpSender { public: /** * Constructor: */ HttpSender(); // Destructor virtual ~HttpSender(); /** * Set a proxy server */ virtual void setProxy(const std::string& proxy) = 0; /** * HTTP(S) request: pass method and path, HTTP headers and POST/PUT payload. */ virtual int sendRequest( const std::string& method = std::string(HTTP_SENDER_DEFAULT_METHOD), const std::string& path = std::string(HTTP_SENDER_DEFAULT_PATH), const std::vector<std::pair<std::string, std::string>>& headers = {}, const std::string& payload = std::string() ) = 0; virtual std::string getHostPort() = 0; virtual std::string getHTTPResponse() = 0; virtual unsigned int getMaxRetries() = 0; virtual void setAuthMethod (std::string& authMethod) = 0; virtual void setAuthBasicCredentials(std::string& authBasicCredentials) = 0; virtual void setMaxRetries (unsigned int retries) = 0; // OCS configurations virtual void setOCSNamespace (std::string& OCSNamespace) = 0; virtual void setOCSTenantId (std::string& OCSTenantId) = 0; virtual void setOCSClientId (std::string& OCSClientId) = 0; virtual void setOCSClientSecret (std::string& OCSClientSecret) = 0; virtual void setOCSToken (std::string& OCSToken) = 0; /** * @brief Constructs the file path for the OMF log. * * @return A string representing the path to the OMF log file. */ static std::string getOMFTracePath(); /** * @brief Creates the '/logs/debug-trace' subdirectory in the Fledge data directory. * */ static bool createDebugTraceDirectory(); }; /** * BadRequest exception */ class BadRequest : public std::exception { public: // Constructor with parameter BadRequest(const std::string& serverReply) { m_errmsg = serverReply; }; virtual const char *what() const throw() { return m_errmsg.c_str(); } private: std::string m_errmsg; }; /** * Unauthorized exception */ class Unauthorized : public std::exception { public: // Constructor with parameter Unauthorized (const std::string& serverReply) { m_errmsg = serverReply; }; virtual const char *what() const throw() { return m_errmsg.c_str(); } private: std::string m_errmsg; }; /** * Conflict exception */ class Conflict : public std::exception { public: // Constructor with parameter Conflict (const std::string& serverReply) { m_errmsg = serverReply; }; virtual const char *what() const throw() { return m_errmsg.c_str(); } private: std::string m_errmsg; }; #endif ================================================ FILE: C/plugins/common/include/libcurl_https.h ================================================ #ifndef _LIBCURL_HTTPS_H #define _LIBCURL_HTTPS_H /* * Fledge HTTP Sender wrapper. * * Copyright (c) 2019 Diamnomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <string> #include <vector> #include <http_sender.h> #include <curl/curl.h> #include <fstream> using namespace std; class LibcurlHttps: public HttpSender { public: /** * Constructor: * pass host:port, connect & request timeouts, retry_sleep_Time, max_retry */ LibcurlHttps(const std::string& host_port, unsigned int connect_timeout = 0, unsigned int request_timeout = 0, unsigned int retry_sleep_Time = 1, unsigned int max_retry = 4); // Destructor ~LibcurlHttps(); /** * Set the proxy host and port */ void setProxy(const std::string& proxy); /** * HTTP(S) request: pass method and path, HTTP headers and POST/PUT payload. */ int sendRequest( const std::string& method = std::string(HTTP_SENDER_DEFAULT_METHOD), const std::string& path = std::string(HTTP_SENDER_DEFAULT_PATH), const std::vector<std::pair<std::string, std::string>>& headers = {}, const std::string& payload = std::string() ); void setAuthMethod (std::string& authMethod) {m_authMethod = authMethod; } void setAuthBasicCredentials(std::string& authBasicCredentials) {m_authBasicCredentials = authBasicCredentials; } void setMaxRetries (unsigned int retries) {m_max_retry = retries; }; // OCS configurations void setOCSNamespace (std::string& OCSNamespace) {m_OCSNamespace = OCSNamespace; } void setOCSTenantId (std::string& OCSTenantId) {m_OCSTenantId = OCSTenantId; } void setOCSClientId (std::string& OCSClientId) {m_OCSClientId = OCSClientId; } void setOCSClientSecret (std::string& OCSClientSecret) {m_OCSClientSecret = OCSClientSecret; } void setOCSToken (std::string& OCSToken) {m_OCSToken = OCSToken; } std::string getHostPort() { return m_host_port; }; std::string getHTTPResponse() { return m_HTTPResponse; }; unsigned int getMaxRetries() { return m_max_retry; }; private: // Make private the copy constructor and operator= LibcurlHttps(const LibcurlHttps&); LibcurlHttps& operator=(LibcurlHttps const &); void setLibCurlOptions(CURL *sender, const std::string& path, const vector<pair<std::string, std::string>>& headers); void setTrace(); void resetTrace(); private: CURL *m_sender; std::string m_HTTPResponse; std::string m_host_port; unsigned int m_retry_sleep_time; // Seconds between each retry unsigned int m_max_retry; // Max number of retries in the communication std::string m_authMethod; // Authentication method to be used std::string m_authBasicCredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) struct curl_slist *m_chunk = NULL; unsigned int m_request_timeout; unsigned int m_connect_timeout; // OCS configurations std::string m_OCSNamespace; std::string m_OCSTenantId; std::string m_OCSClientId; std::string m_OCSClientSecret; std::string m_OCSToken; std::ofstream m_ofs; bool m_log; }; #endif ================================================ FILE: C/plugins/common/include/piwebapi.h ================================================ #ifndef _PIWEBAPI_H #define _PIWEBAPI_H /* * Fledge OSIsoft PI Web API integration. * * Copyright (c) 2020-2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <string> using namespace std; #define TIMEOUT_CONNECT 10 #define TIMEOUT_REQUEST 10 #define RETRY_SLEEP_TIME 1 #define MAX_RETRY 3 #define URL_GET_VERSION "/piwebapi/system" /** * The PIWebAPI class. */ class PIWebAPI { public: PIWebAPI(); // Destructor ~PIWebAPI(); void setAuthMethod (std::string& authMethod) {m_authMethod = authMethod; } void setAuthBasicCredentials(std::string& authBasicCredentials) {m_authBasicCredentials = authBasicCredentials; } int GetVersion(const string& host, string &version, bool logMessage = true); string ExtractVersion(const string& response); string errorMessageHandler(const string& msg); private: string extractSection(const string& msg, const string& toSearch); string extractMessageFromJSon(const string& json); string m_authMethod; // Authentication method to be used string m_authBasicCredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) // Substitute a message with a different one const vector<pair<string, string>> PIWEB_ERRORS = { // original message New one {"Noroutetohost", "The PI Web API server is not reachable, verify the network reachability"}, {"No route to host", "The PI Web API server is not reachable, verify the network reachability"}, }; }; #endif ================================================ FILE: C/plugins/common/include/simple_http.h ================================================ #ifndef _SIMPLE_HTTP_H #define _SIMPLE_HTTP_H /* * Fledge HTTP Sender wrapper. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Mark Riddoch */ #include <string> #include <vector> #include <http_sender.h> #include <client_http.hpp> #include <fstream> using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; class SimpleHttp: public HttpSender { public: /** * Constructor: pass host:port & connect & request timeouts */ SimpleHttp(const std::string& host_port, unsigned int connect_timeout = 0, unsigned int request_timeout = 0, unsigned int retry_sleep_Time = 1, unsigned int max_retry = 4); // Destructor ~SimpleHttp(); /** * Set the host and port of the proxy server */ void setProxy(const std::string& proxy); /** * HTTP(S) request: pass method and path, HTTP headers and POST/PUT payload. */ int sendRequest( const std::string& method = std::string(HTTP_SENDER_DEFAULT_METHOD), const std::string& path = std::string(HTTP_SENDER_DEFAULT_PATH), const std::vector<std::pair<std::string, std::string>>& headers = {}, const std::string& payload = std::string() ); void setAuthMethod (std::string& authMethod) {m_authMethod = authMethod; } void setAuthBasicCredentials(std::string& authBasicCredentials) {m_authBasicCredentials = authBasicCredentials; } std::string getHostPort() { return m_host_port; }; std::string getHTTPResponse() { return m_HTTPResponse; }; unsigned int getMaxRetries() { return m_max_retry; }; // OCS configurations void setOCSNamespace (std::string& OCSNamespace) {m_OCSNamespace = OCSNamespace; } void setOCSTenantId (std::string& OCSTenantId) {m_OCSTenantId = OCSTenantId; } void setOCSClientId (std::string& OCSClientId) {m_OCSClientId = OCSClientId; } void setOCSClientSecret (std::string& OCSClientSecret) {m_OCSClientSecret = OCSClientSecret; } void setOCSToken (std::string& OCSToken) {m_OCSToken = OCSToken; } void setMaxRetries (unsigned int retries) {m_max_retry = retries; }; private: // Make private the copy constructor and operator= SimpleHttp(const SimpleHttp&); SimpleHttp& operator=(SimpleHttp const &); void setTrace(); void resetTrace(); private: std::string m_host_port; HttpClient *m_sender; std::string m_HTTPResponse; unsigned int m_retry_sleep_time; // Seconds between each retry unsigned int m_max_retry; // Max number of retries in the communication std::string m_authMethod; // Authentication method to be used std::string m_authBasicCredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) // OCS configurations std::string m_OCSNamespace; std::string m_OCSTenantId; std::string m_OCSClientId; std::string m_OCSClientSecret; std::string m_OCSToken; bool m_log; std::ofstream m_ofs; }; #endif ================================================ FILE: C/plugins/common/include/simple_https.h ================================================ #ifndef _SIMPLE_HTTPS_H #define _SIMPLE_HTTPS_H /* * Fledge HTTP Sender wrapper. * * Copyright (c) 2018 Diamnomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Mark Riddoch, Stefano Simonelli */ #include <string> #include <vector> #include <http_sender.h> #include <client_https.hpp> #include <fstream> using HttpsClient = SimpleWeb::Client<SimpleWeb::HTTPS>; class SimpleHttps: public HttpSender { public: /** * Constructor: * pass host:port, connect & request timeouts */ SimpleHttps(const std::string& host_port, unsigned int connect_timeout = 0, unsigned int request_timeout = 0, unsigned int retry_sleep_Time = 1, unsigned int max_retry = 4); // Destructor ~SimpleHttps(); /** * Set the host and port of the proxy server */ void setProxy(const std::string& proxy); /** * HTTP(S) request: pass method and path, HTTP headers and POST/PUT payload. */ int sendRequest( const std::string& method = std::string(HTTP_SENDER_DEFAULT_METHOD), const std::string& path = std::string(HTTP_SENDER_DEFAULT_PATH), const std::vector<std::pair<std::string, std::string>>& headers = {}, const std::string& payload = std::string() ); void setAuthMethod (std::string& authMethod) {m_authMethod = authMethod; } void setAuthBasicCredentials(std::string& authBasicCredentials) {m_authBasicCredentials = authBasicCredentials; } void setMaxRetries (unsigned int retries) {m_max_retry = retries; }; // OCS configurations void setOCSNamespace (std::string& OCSNamespace) {m_OCSNamespace = OCSNamespace; } void setOCSTenantId (std::string& OCSTenantId) {m_OCSTenantId = OCSTenantId; } void setOCSClientId (std::string& OCSClientId) {m_OCSClientId = OCSClientId; } void setOCSClientSecret (std::string& OCSClientSecret) {m_OCSClientSecret = OCSClientSecret; } void setOCSToken (std::string& OCSToken) {m_OCSToken = OCSToken; } std::string getHTTPResponse() { return m_HTTPResponse; }; std::string getHostPort() { return m_host_port; }; unsigned int getMaxRetries() { return m_max_retry; }; private: // Make private the copy constructor and operator= SimpleHttps(const SimpleHttps&); SimpleHttps& operator=(SimpleHttps const &); void setTrace(); void resetTrace(); private: std::string m_host_port; HttpsClient *m_sender; std::string m_HTTPResponse; unsigned int m_retry_sleep_time; // Seconds between each retry unsigned int m_max_retry; // Max number of retries in the communication std::string m_authMethod; // Authentication method to be used std::string m_authBasicCredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) // OCS configurations std::string m_OCSNamespace; std::string m_OCSTenantId; std::string m_OCSClientId; std::string m_OCSClientSecret; std::string m_OCSToken; bool m_log; std::ofstream m_ofs; }; #endif ================================================ FILE: C/plugins/common/libcurl_https.cpp ================================================ /* * Fledge HTTPS Sender implementation using the libcurl library * - https://curl.haxx.se/libcurl/ * - https://github.com/curl/curl * * Fledge uses the libcurl library to support the Kerberos authentication * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <thread> #include <logger.h> #include <vector> #include <sstream> #include <string.h> #include <stdlib.h> #include <curl/curl.h> #include <unistd.h> #include "libcurl_https.h" #include "string_utils.h" #define VERBOSE_LOG 0 #define HTTP_HEADER_LINE 255 using namespace std; /** * Creates a UTC time string for the current time * * @return Current UTC time */ static std::string CurrentTimeString() { time_t now = time(NULL); struct tm timeinfo; gmtime_r(&now, &timeinfo); char timeString[20]; strftime(timeString, sizeof(timeString), "%F %T", &timeinfo); return std::string(timeString); } /** * Constructor: host:port, connect_timeout, request_timeout, * retry_sleep_Time, max_retry * * Logs the messages into omf.log if the file is present */ LibcurlHttps::LibcurlHttps(const string& host_port, unsigned int connect_timeout, unsigned int request_timeout, unsigned int retry_sleep_Time, unsigned int max_retry) : HttpSender(), m_connect_timeout(connect_timeout), m_request_timeout(request_timeout), m_host_port(host_port), m_retry_sleep_time(retry_sleep_Time), m_max_retry (max_retry) { if (curl_global_init(CURL_GLOBAL_DEFAULT) != 0) { Logger::getLogger()->error("libcurl_https - curl_global_init failed, the libcurl library cannot be initialized."); } setTrace(); } /** * Destructor */ LibcurlHttps::~LibcurlHttps() { resetTrace(); curl_global_cleanup(); } /** * @brief Configures logging for the HTTP sender. */ void LibcurlHttps::setTrace() { // Check if the log file is writable std::string tracePath = HttpSender::getOMFTracePath(); if (access(tracePath.c_str(), W_OK) == 0) { m_log = true; m_ofs.open(tracePath.c_str(), ofstream::app); } else { m_log = false; } } /** * @brief Resets the logging state for the HTTP sender. */ void LibcurlHttps::resetTrace() { if (m_log) { m_ofs.close(); // Close the log file if it is open } } /** * Add a proxy server * * @param proxy The host and port of the proxy */ void LibcurlHttps::setProxy(const string& proxy) { curl_easy_setopt(m_sender, CURLOPT_PROXY, proxy.c_str()); } /** * Avoid libcurl debug messages */ size_t cb_write_data(void *buffer, size_t size, size_t nmemb, void *userp) { return size * nmemb; } /** * Handle the call back header to retrieve the text message in response to an HTTP request * this call back is called for all the headers lines * * received header is nitems * size long in 'buffer' NOT ZERO TERMINATED * userdata' is set with CURLOPT_HEADERDATA * * @param buffer Header message * @param size (nitems * size) is the size of 'buffer' * @param nitems * @param userdata out Buffer to store the data needed * */ static size_t cb_header(char *buffer, size_t size, size_t nitems, void *userdata) { char *header = (char *) userdata; int numBytes = 0; bool getHeader = false; // Only the first line of the headers is needed if (*header == '\0') { getHeader = true; } else { // in some situations as using Kerberos // the last header starting with HTTP contains the real error char tmpBuffer[10]; sprintf(tmpBuffer, "%.*s", 4, buffer); string tmpStr = tmpBuffer; for (auto & c: tmpStr) c = toupper(c); if (tmpStr.compare("HTTP") == 0) { getHeader = true; } } if (getHeader) { if ((size * nitems) < (HTTP_HEADER_LINE - 1)) numBytes = size * nitems; else numBytes = HTTP_HEADER_LINE - 1; sprintf(header, "%.*s", numBytes, buffer); } return nitems * size; } /** * Setups the libcurl general options used in all the HTTP methods * * @param sender libcurl handle on which the options should be configured * @param path The URL path * @param headers The optional headers to send * */ void LibcurlHttps::setLibCurlOptions(CURL *sender, const string& path, const vector<pair<string, string>>& headers) { string httpHeader; #if VERBOSE_LOG curl_easy_setopt(m_sender, CURLOPT_VERBOSE, 1L); #else curl_easy_setopt(m_sender, CURLOPT_VERBOSE, 0L); // this workaround is needed to avoid all libcurl debug messages curl_easy_setopt(m_sender, CURLOPT_WRITEFUNCTION, cb_write_data); #endif curl_easy_setopt(m_sender, CURLOPT_NOPROGRESS, 1L); curl_easy_setopt(m_sender, CURLOPT_TCP_KEEPALIVE, 1L); curl_easy_setopt(m_sender, CURLOPT_TIMEOUT, m_request_timeout); curl_easy_setopt(m_sender, CURLOPT_CONNECTTIMEOUT, m_connect_timeout); // HTTP headers handling m_chunk = curl_slist_append(m_chunk, "User-Agent: " HTTP_SENDER_USER_AGENT); // To let PI Web API having Cross-Site Request Forgery (CSRF) enabled as by default configuration m_chunk = curl_slist_append(m_chunk, "X-Requested-With: XMLHttpRequest"); for (auto it = headers.begin(); it != headers.end(); ++it) { httpHeader = (*it).first + ": " + (*it).second; m_chunk = curl_slist_append(m_chunk, httpHeader.c_str()); } // Handle basic authentication if (m_authMethod == "b") { httpHeader = "Authorization: Basic " + m_authBasicCredentials; m_chunk = curl_slist_append(m_chunk, httpHeader.c_str()); /* set user name and password for the authentication */ //curl_easy_setopt(m_sender, CURLOPT_USERPWD, "user:pwd"); } curl_easy_setopt(m_sender, CURLOPT_HTTPHEADER, m_chunk); // Handle Kerberos authentication if (m_authMethod == "k") { Logger::getLogger()->debug("Kerberos authentication - keytab file :%s: ", getenv("KRB5_CLIENT_KTNAME")); curl_easy_setopt(m_sender, CURLOPT_HTTPAUTH, CURLAUTH_GSSNEGOTIATE); // The empty user should be defined for Kerberos authentication curl_easy_setopt(m_sender, CURLOPT_USERPWD, ":"); } // Configure libcurl string url = "https://" + m_host_port + path; curl_easy_setopt(m_sender, CURLOPT_URL, url.c_str()); // Setup SSL curl_easy_setopt(m_sender, CURLOPT_USE_SSL, CURLUSESSL_ALL); curl_easy_setopt(m_sender, CURLOPT_SSL_VERIFYPEER, 0L); curl_easy_setopt(m_sender, CURLOPT_SSL_VERIFYHOST, 0L); curl_easy_setopt(m_sender, CURLOPT_HTTP_VERSION, (long)CURL_HTTP_VERSION_2TLS); } /** * Send data, it retries the operation m_max_retry times * waiting m_retry_sleep_time*2 at each attempt * * @param method The HTTP method (GET, POST, ...) * @param path The URL path * @param headers The optional headers to send * @param payload The optional data payload (for POST, PUT) * @return The HTTP code for the cases : 1xx Informational / * 2xx Success / * 3xx Redirection * @throw BadRequest for HTTP 400 error * std::exception as generic exception for all the * cases >= 401 Client errors / 5xx Server errors */ int LibcurlHttps::sendRequest( const string& method, const string& path, const vector<pair<string, string>>& headers, const string& payload ) { // Variables definition long httpCode = 0; string httpResponseText; char httpHeaderBuffer[HTTP_HEADER_LINE]; bool retry = false; int retryCount = 1; int sleepTime = m_retry_sleep_time; CURLcode res = CURLE_OK; enum exceptionType { none, typeBadRequest, typeException }; exceptionType exceptionRaised = none; string exceptionMessage; string errorMessage; // Init libcurl m_sender = curl_easy_init(); if(m_sender) { setLibCurlOptions(m_sender, path, headers); } else { string errorMessage = "libcurl_https - curl_easy_init failed, the libcurl library cannot be initialized."; Logger::getLogger()->error(errorMessage); throw runtime_error(errorMessage.c_str()); } // Select the requested HTTP method if (method.compare("POST") == 0) { curl_easy_setopt(m_sender, CURLOPT_POST, 1L); curl_easy_setopt(m_sender, CURLOPT_POSTFIELDS, payload.c_str()); curl_easy_setopt(m_sender, CURLOPT_POSTFIELDSIZE, (long) payload.length()); } else if (method.compare("GET") == 0) { // TODO : to be implemented errorMessage = "libcurl_https - method GET is not currently implemented"; Logger::getLogger()->debug(errorMessage); throw runtime_error(errorMessage); } else if (method.compare("PUT") == 0) { // TODO : to be implemented errorMessage = "libcurl_https - method PUT currently not implemented"; Logger::getLogger()->debug("libcurl_https - method PUT is not currently implemented"); throw runtime_error(errorMessage); } else if (method.compare("DELETE") == 0) { // TODO : to be implemented errorMessage = "libcurl_https - method DELETE currently not implemented"; Logger::getLogger()->debug("libcurl_https - method DELETE is not currently implemented "); throw runtime_error(errorMessage); } do { std::chrono::high_resolution_clock::time_point tStart; try { exceptionRaised = none; httpCode = 0; httpResponseText = ""; httpHeaderBuffer[0] = '\0'; // It is needed to handle the call back header to retrieve the text message // in response to an HTTP request // curl.haxx.se/mail/lib-2013-10/0114.html curl_easy_setopt(m_sender, CURLOPT_HEADERDATA, httpHeaderBuffer); curl_easy_setopt(m_sender, CURLOPT_HEADERFUNCTION, cb_header); if (m_log) { m_ofs << endl << method << " " << path << endl; m_ofs << "Headers" << endl; for (auto it = headers.begin(); it != headers.end(); it++) { m_ofs << " " << it->first << ": " << it->second << endl; } m_ofs << "Payload:" << endl; m_ofs << payload << endl; tStart = std::chrono::high_resolution_clock::now(); } // Execute the HTTP method res = curl_easy_perform(m_sender); curl_easy_getinfo(m_sender, CURLINFO_RESPONSE_CODE, &httpCode); // fix the text message // NOTE : the text should be considered only if the HTTP code is not an ACK httpResponseText = httpHeaderBuffer; if (m_log) { std::chrono::high_resolution_clock::time_point tEnd = std::chrono::high_resolution_clock::now(); m_ofs << "Response:" << endl; m_ofs << " Code: " << httpCode << endl; m_ofs << " Time: " << ((double)std::chrono::duration_cast<std::chrono::microseconds>(tEnd - tStart).count()) / 1.0E6 << " sec " << CurrentTimeString() << endl; m_ofs << " Content: " << httpResponseText << endl << endl; } StringStripCRLF(httpResponseText); m_HTTPResponse = httpResponseText; } catch (exception &ex) { exceptionRaised = typeException; errorMessage = "Failed to send data: "; errorMessage.append(ex.what()); } if ( (res == CURLE_OK ) && (exceptionRaised == none ) && ((httpCode >= 200) && (httpCode <= 399)) ) { retry = false; #if VERBOSE_LOG Logger::getLogger()->info("HTTPS sendRequest succeeded : retry count |%d| HTTP code |%d|", retryCount, httpCode); #endif } else { if (res != CURLE_OK) { errorMessage = string(curl_easy_strerror(res) ); if (httpResponseText.compare("") != 0 ) errorMessage += " - " + httpResponseText; } else { // the situation is : CURLE_OK but the httpCode reports an error errorMessage = httpResponseText; } #if VERBOSE_LOG if (exceptionRaised) { Logger::getLogger()->error( "HTTPS sendRequest : retry count |%d| error |%s| message |%s|", retryCount, errorMessage.c_str(), payload.c_str()); } else { Logger::getLogger()->error( "HTTPS sendRequest : retry count |%d| HTTP code |%d| error message |%s| HTTP message |%s|", retryCount, httpCode, errorMessage.c_str(), payload.c_str()); } #endif if (m_log && !errorMessage.empty()) { std::chrono::high_resolution_clock::time_point tEnd = std::chrono::high_resolution_clock::now(); m_ofs << " Time: " << ((double)std::chrono::duration_cast<std::chrono::microseconds>(tEnd - tStart).count()) / 1.0E6 << " sec " << CurrentTimeString() << endl; m_ofs << " Exception: " << errorMessage << endl; } if (retryCount < m_max_retry) { this_thread::sleep_for(chrono::seconds(sleepTime)); retry = true; sleepTime *= 2; retryCount++; } else { retry = false; } } } while (retry); // Cleanup curl_easy_cleanup(m_sender); curl_slist_free_all(m_chunk); m_sender = NULL; m_chunk = NULL; // Check if an error should be raised if (exceptionRaised == none) { // 0 = HTTP failed without an HTTP code if (httpCode == 0) { throw runtime_error(errorMessage); } else if (httpCode == 400) { throw BadRequest(errorMessage); } else if (httpCode == 401) { throw Unauthorized(errorMessage); } else if (httpCode == 409) { throw Conflict(errorMessage); } else if (httpCode >= 401) { string errorMessageHTTP; errorMessageHTTP = "HTTP code |" + to_string(httpCode) + "| - HTTP error |" + errorMessage + "|"; throw runtime_error(errorMessageHTTP); } } else { throw runtime_error(errorMessage); } return httpCode; } ================================================ FILE: C/plugins/common/piwebapi.cpp ================================================ /* * Fledge OSIsoft PI Web API integration. * Implements the integration for the specific functionalities exposed by PI Web API * * Copyright (c) 2020-2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <string> #include <vector> #include <utility> #include <piwebapi.h> #include <string_utils.h> #include <logger.h> #include <simple_https.h> #include <string_utils.h> #include <rapidjson/document.h> #include "rapidjson/error/en.h" #include <stdlib.h> #include <string.h> #include <status_code.hpp> using namespace std; using namespace rapidjson; PIWebAPI::PIWebAPI() { } // Destructor PIWebAPI::~PIWebAPI() { } /** * Extracts the PIWebAPI version from the JSON returned by the PIWebAPI api * * @param response JSON message generated by the PIWebAPI API containing the version * @return The version of the PIWebAPI server * */ std::string PIWebAPI::ExtractVersion(const string& response) { Document JSon; string version; string responseFixed; ParseResult ok; // TODO: at the current stage a non JSON is returned, so we fixed the format ok = JSon.Parse(response.c_str()); if (!ok) { responseFixed = "{\"" + response; StringStripCRLF(responseFixed); } else { responseFixed = response; } ok = JSon.Parse(responseFixed.c_str()); if (!ok) { Logger::getLogger()->error("PIWebAPI version extract, invalid json - HTTP response :%s:", response.c_str()); } else { if (JSon.HasMember("ProductTitle")) { version = JSon["ProductTitle"].GetString(); } if (JSon.HasMember("ProductVersion")) { version = version + "-" + JSon["ProductVersion"].GetString(); } } return(version); } /** * Calls the PI Web API to retrieve the version * * @param host Reference of the server running PI Web API in the format: hostName + ":" + port * @param version The returned version string of the PI Web API server * @param logMessage If true, log an error message if there is a failure (default: true) * @return HTTP response status code * */ int PIWebAPI::GetVersion(const string& host, string &version, bool logMessage) { string response; string payload; string errorMsg; HttpSender *endPoint; vector<pair<string, string>> header; int httpCode; endPoint = new SimpleHttps(host, TIMEOUT_CONNECT, TIMEOUT_REQUEST, RETRY_SLEEP_TIME, MAX_RETRY); // HTTP header header.push_back( std::make_pair("Content-Type", "application/json")); header.push_back( std::make_pair("Accept", "application/json")); // HTTP payload payload = ""; // Set requested authentication endPoint->setAuthMethod (m_authMethod); endPoint->setAuthBasicCredentials(m_authBasicCredentials); try { httpCode = endPoint->sendRequest("GET", URL_GET_VERSION, header, payload); response = endPoint->getHTTPResponse(); if (httpCode >= 200 && httpCode <= 399) { version = ExtractVersion(response); } else if (logMessage) { errorMsg = errorMessageHandler(response); Logger::getLogger()->error("Error in retrieving the PI Web API version from %s: [%d] %s ", host.c_str(), httpCode, errorMsg.c_str()); } } catch (const BadRequest& ex) { if (logMessage) { errorMsg = errorMessageHandler(ex.what()); Logger::getLogger()->error("BadRequest error retrieving the PI Web API version from %s: %s", host.c_str(), errorMsg.c_str()); } httpCode = (int) SimpleWeb::StatusCode::client_error_bad_request; } catch (const Unauthorized& ex) { if (logMessage) { Logger::getLogger()->error("The PI Web API server at %s has rejected our request due to an authentication issue. Please check the authentication method and credentials are correctly configured.", host.c_str()); } httpCode = (int) SimpleWeb::StatusCode::client_error_unauthorized; } catch (exception &ex) { if (logMessage) { errorMsg = errorMessageHandler(ex.what()); Logger::getLogger()->error("Error in retrieving the PI Web API version from %s: %s", host.c_str(), errorMsg.c_str()); } httpCode = (int) SimpleWeb::StatusCode::server_error_service_unavailable; } delete endPoint; return httpCode; } /** * Extracts a section from a string, between the string and the '|' character * * @param msg Msg from which the string must be extracted * @param toSearch The string to be searched as the starting position for the extraction * @return The extracted string * */ string PIWebAPI::extractSection(const string& msg, const string& toSearch) { string::size_type pos, pos1, pos2; string section; pos = msg.find (toSearch); if (pos != string::npos ) { pos1 = msg.find ("|",pos); pos2 = msg.find ("|",pos1+1); if (pos2 != string::npos ) { section = msg.substr(pos1 +1, pos2 - pos1 -1); } } return (section); } /** * Handles PIWebAPI json error message extracting significant parts to produce a meaningful and concise message * * OSIsoft documentation about the error structure generated by PIWebAPI: * https://docs.osisoft.com/bundle/pi-web-api-reference/page/help/topics/error-handling.html * * @param json JSON message generated by PIWebAPI containing the error * @return The concise and meaningful error message* * */ string PIWebAPI::extractMessageFromJSon(const string& json) { Document JSon; ParseResult ok; string::size_type pos; string msgFinal, msgFixed; string msgMessage, msgReason,msgName, msgValue; msgFixed = extractSection(json, "HTTP error |"); if (msgFixed.empty()) msgFixed = json; ok = JSon.Parse(msgFixed.c_str()); if (!ok) { // removes bad characters if present in the error message char badChars[4]; badChars[0]='\357'; badChars[1]='\273'; badChars[2]='\277'; badChars[3]=0; pos = msgFixed.find (badChars); if (pos != string::npos ) { msgFixed.erase ( pos, strlen(badChars) ); } } ok = JSon.Parse(msgFixed.c_str()); if (ok) { if (JSon.HasMember("Messages")) { Value &messages = JSon["Messages"]; if (messages.IsArray()) { long messageId; for (Value::ConstValueIterator itr = messages.Begin(); itr != messages.End(); ++itr) { if ((*itr)["MessageIndex"].IsInt64()) messageId = (*itr)["MessageIndex"].GetInt64(); if ((*itr).HasMember("Events")) { const Value &messageEvents = (*itr)["Events"]; if (messageEvents.IsArray()) { Value::ConstValueIterator itrEvents = messageEvents.Begin(); const Value &messageInfo = (*itrEvents)["EventInfo"]; msgMessage = messageInfo["Message"].GetString(); if (messageInfo.HasMember("Reason") && messageInfo["Reason"].IsString()) msgReason = messageInfo["Reason"].GetString(); const Value ¶meters = messageInfo["Parameters"]; if (parameters.IsArray()) { for (Value::ConstValueIterator itrParameters = parameters.Begin(); itrParameters != parameters.End(); ++itrParameters) { if (! msgValue.empty()) msgValue += " "; msgName = (*itrParameters)["Name"].GetString(); msgValue += (*itrParameters)["Value"].GetString(); } msgFinal = msgMessage; if (!msgReason.empty()) msgFinal += " " + msgReason; if (!msgValue.empty()) msgFinal += " " + msgValue; break; } else { Logger::getLogger()->warn("PI Web API errors handling expects to received Parameters as an JSON array"); } } } else { Logger::getLogger()->warn("PI Web API errors handling expects to received Events as an JSON array"); } } } else { Logger::getLogger()->warn("PI Web API errors handling expects to received Messages as an JSON array"); } } } return (msgFinal); } /** * Handles PI Web API error message considering the possible cases: * * - removes all the LF CR and extracts spaces * - substitutes a message with a different one using an hardcoded vector * - in the case of the presence of an HTTP code adds the corresponding text using the Simple-Web-Server functionalities * - Handles PI Web API json error message extracting significant parts to produce a meaningful and concise message * * @param json JSON message generated by PIWebAPI containing the error * @return The concise and meaningful error message * */ string PIWebAPI::errorMessageHandler(const string& msg) { Document JSon; ParseResult ok; string msgTrimmed, msgSub, msgHttp, msgJson, finalMsg, msgFixed, messages, tmpMsg; string httpCode; int httpCodeN; msgTrimmed = StringStripWhiteSpacesExtra(msg); // Handles error message substitution for(auto &errorMsg : PIWEB_ERRORS) { if (msgTrimmed.find(errorMsg.first) != std::string::npos) { msgSub = errorMsg.second; } } // Handles HTTP error code recognition httpCode = extractSection(msgTrimmed, "HTTP code |"); if (! httpCode.empty()) { SimpleWeb::StatusCode code; httpCodeN = atoi(httpCode.c_str()); code = (SimpleWeb::StatusCode) httpCodeN; msgHttp = SimpleWeb::status_code(code); } // Handles error in JSON format returned by the PI Web API msgJson = extractMessageFromJSon (msgTrimmed); // Define the final message finalMsg = msg; if (!msgSub.empty()) finalMsg = msgSub; if (!msgHttp.empty()) finalMsg = msgHttp; if (!msgJson.empty()) finalMsg = msgJson; return(finalMsg); } ================================================ FILE: C/plugins/common/simple_http.cpp ================================================ /* * Fledge HTTP Sender implementation using the * Simple Web Seerver HTTP library. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Mark Riddoch */ #include <simple_http.h> #include <thread> #include <logger.h> #include <unistd.h> #define VERBOSE_LOG 0 using namespace std; /** * Creates a UTC time string for the current time * * @return Current UTC time */ static std::string CurrentTimeString() { time_t now = time(NULL); struct tm timeinfo; gmtime_r(&now, &timeinfo); char timeString[20]; strftime(timeString, sizeof(timeString), "%F %T", &timeinfo); return std::string(timeString); } // Using https://github.com/eidheim/Simple-Web-Server using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; /** * Constructor: host:port, connect_timeout, request_timeout */ SimpleHttp::SimpleHttp(const string& host_port, unsigned int connect_timeout, unsigned int request_timeout, unsigned int retry_sleep_Time, unsigned int max_retry) : HttpSender(), m_host_port(host_port), m_retry_sleep_time(retry_sleep_Time), m_max_retry (max_retry) { m_sender = new HttpClient(host_port); m_sender->config.timeout = (time_t)request_timeout; m_sender->config.timeout_connect = (time_t)connect_timeout; setTrace(); } /** * @brief Configures logging for the HTTP sender. */ void SimpleHttp::setTrace() { // Check if the log file is writable std::string tracePath = HttpSender::getOMFTracePath(); if (access(tracePath.c_str(), W_OK) == 0) { m_log = true; m_ofs.open(tracePath.c_str(), ofstream::app); } else { m_log = false; } } /** * @brief Resets the logging state for the HTTP sender. */ void SimpleHttp::resetTrace() { if (m_log) { m_ofs.close(); // Close the log file if it is open } } /** * Set a proxy server * * @param proxy The name and port of the proxy server */ void SimpleHttp::setProxy(const string& proxy) { m_sender->config.proxy_server = proxy; } /** * Destructor */ SimpleHttp::~SimpleHttp() { resetTrace(); delete m_sender; } /** * Send data * * @param method The HTTP method (GET, POST, ...) * @param path The URL path * @param headers The optional headers to send * @param payload The optional data payload (for POST, PUT) * @return The HTTP code on success or 0 on execptions */ int SimpleHttp::sendRequest( const string& method, const string& path, const vector<pair<string, string>>& headers, const string& payload ) { SimpleWeb::CaseInsensitiveMultimap header; // Add Fledge UserAgent header.emplace("User-Agent", HTTP_SENDER_USER_AGENT); header.emplace("Content-Type", "application/json"); // To let PI Web API having Cross-Site Request Forgery (CSRF) enabled as by default configuration header.emplace("X-Requested-With", "XMLHttpRequest"); // Add custom headers for (auto it = headers.begin(); it != headers.end(); ++it) { header.emplace((*it).first, (*it).second); } // Handle basic authentication if (m_authMethod == "b") header.emplace("Authorization", "Basic " + m_authBasicCredentials); string retCode; string response; int http_code; bool retry = false; int retry_count = 1; int sleep_time = m_retry_sleep_time; enum exceptionType { none, typeBadRequest, typeException }; exceptionType exception_raised; string exception_message; do { std::chrono::high_resolution_clock::time_point tStart; try { exception_raised = none; http_code = 0; if (m_log) { m_ofs << endl << method << " " << path << endl; m_ofs << "Headers:" << endl; for (auto it = header.begin(); it != header.end(); it++) { m_ofs << " " << it->first << ": " << it->second << endl; } m_ofs << "Payload:" << endl; m_ofs << payload << endl; tStart = std::chrono::high_resolution_clock::now(); } // Call HTTPS method auto res = m_sender->request(method, path, payload, header); retCode = res->status_code; response = res->content.string(); if (m_log) { std::chrono::high_resolution_clock::time_point tEnd = std::chrono::high_resolution_clock::now(); m_ofs << "Response:" << endl; m_ofs << " Code: " << res->status_code << endl; m_ofs << " Time: " << ((double)std::chrono::duration_cast<std::chrono::microseconds>(tEnd - tStart).count()) / 1.0E6 << " sec " << CurrentTimeString() << endl; m_ofs << " Content: " << res->content.string() << endl << endl; } m_HTTPResponse = response; // In same cases the response is an empty string // and retCode contains code and the description if (response.compare("") == 0) response = res->status_code; http_code = atoi(retCode.c_str()); } catch (BadRequest &ex) { exception_raised = typeBadRequest; exception_message = ex.what(); } catch (exception &ex) { exception_raised = typeException; exception_message = "Failed to send data: "; exception_message.append(ex.what()); } if (exception_raised == none && ((http_code >= 200) && (http_code <= 399))) { retry = false; #if VERBOSE_LOG Logger::getLogger()->info("HTTP sendRequest succeeded : retry count |%d| HTTP code |%d| message |%s|", retry_count, http_code, payload.c_str()); #endif } else { #if VERBOSE_LOG if (exception_raised) { Logger::getLogger()->error( "HTTP sendRequest : retry count |%d| error |%s| message |%s|", retry_count, exception_message.c_str(), payload.c_str()); } else { Logger::getLogger()->error( "HTTP sendRequest : retry count |%d| HTTP code |%d| HTTP error |%s| message |%s|", retry_count, http_code, response.c_str(), payload.c_str()); } #endif if (m_log && !exception_message.empty()) { std::chrono::high_resolution_clock::time_point tEnd = std::chrono::high_resolution_clock::now(); m_ofs << " Time: " << ((double)std::chrono::duration_cast<std::chrono::microseconds>(tEnd - tStart).count()) / 1.0E6 << " sec " << CurrentTimeString() << endl; m_ofs << " Exception: " << exception_message << endl; } if (retry_count < m_max_retry) { this_thread::sleep_for(chrono::seconds(sleep_time)); retry = true; sleep_time *= 2; retry_count++; } else { retry = false; } } } while (retry); // Check if an error should be raised if (exception_raised == none) { // If 400 Bad Request, throw BadRequest exception if (http_code == 400) { throw BadRequest(response); } else if (http_code == 401) { throw Unauthorized(response); } else if (http_code == 409) { throw Conflict(response); } else if (http_code > 401) { std::stringstream error_message; error_message << "HTTP code |" << to_string(http_code) << "| HTTP error |" << response << "|"; throw runtime_error(error_message.str()); } } else { if (exception_raised == typeBadRequest) { throw BadRequest(exception_message); } else if (exception_raised == typeException) { throw runtime_error(exception_message); } } return http_code; } ================================================ FILE: C/plugins/common/simple_https.cpp ================================================ /* * Fledge HTTP Sender implementation using the * HTTPS Simple Web Server library * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Mark Riddoch */ #include <simple_https.h> #include <thread> #include <logger.h> #include <string_utils.h> #define VERBOSE_LOG 0 using namespace std; /** * Creates a UTC time string for the current time * * @return Current UTC time */ static std::string CurrentTimeString() { time_t now = time(NULL); struct tm timeinfo; gmtime_r(&now, &timeinfo); char timeString[20]; strftime(timeString, sizeof(timeString), "%F %T", &timeinfo); return std::string(timeString); } // Using https://github.com/eidheim/Simple-Web-Server using HttpsClient = SimpleWeb::Client<SimpleWeb::HTTPS>; /** * Constructor: host:port, connect_timeout, request_timeout */ SimpleHttps::SimpleHttps(const string& host_port, unsigned int connect_timeout, unsigned int request_timeout, unsigned int retry_sleep_Time, unsigned int max_retry) : HttpSender(), m_host_port(host_port), m_retry_sleep_time(retry_sleep_Time), m_max_retry (max_retry) { // Passing false to second parameter avoids certificate verification m_sender = new HttpsClient(host_port, false); m_sender->config.timeout = (time_t)request_timeout; m_sender->config.timeout_connect = (time_t)connect_timeout; setTrace(); } /** * Destructor */ SimpleHttps::~SimpleHttps() { resetTrace(); delete m_sender; } /** * @brief Configures logging for the HTTP sender. */ void SimpleHttps::setTrace() { // Check if the log file is writable std::string tracePath = HttpSender::getOMFTracePath(); if (access(tracePath.c_str(), W_OK) == 0) { m_log = true; m_ofs.open(tracePath.c_str(), ofstream::app); } else { m_log = false; } } /** * @brief Resets the logging state for the HTTP sender. */ void SimpleHttps::resetTrace() { if (m_log) { m_ofs.close(); // Close the log file if it is open } } /** * Set a proxy server * * @param proxy The name and port of the proxy server */ void SimpleHttps::setProxy(const string& proxy) { m_sender->config.proxy_server = proxy; } /** * Send data, it retries the operation m_max_retry times * waiting m_retry_sleep_time*2 at each attempt * * @param method The HTTP method (GET, POST, ...) * @param path The URL path * @param headers The optional headers to send * @param payload The optional data payload (for POST, PUT) * @return The HTTP code for the cases : 1xx Informational / 2xx Success / 3xx Redirection * @throw BadRequest for HTTP 400 error * std::exception as generic exception for all the cases >= 401 Client errors / 5xx Server errors */ int SimpleHttps::sendRequest( const string& method, const string& path, const vector<pair<string, string>>& headers, const string& payload ) { SimpleWeb::CaseInsensitiveMultimap header; // Add Fledge UserAgent header.emplace("User-Agent", HTTP_SENDER_USER_AGENT); // To let PI Web API having Cross-Site Request Forgery (CSRF) enabled as by default configuration header.emplace("X-Requested-With", "XMLHttpRequest"); // Add custom headers for (auto it = headers.begin(); it != headers.end(); ++it) { header.emplace((*it).first, (*it).second); } // Handle basic authentication if (m_authMethod == "b") { header.emplace("Authorization", "Basic " + m_authBasicCredentials); } else if (m_OCSToken.compare("") != 0) { header.emplace("Authorization", "Bearer " + m_OCSToken); } string retCode; string response; int http_code; bool retry = false; int retry_count = 1; int sleep_time = m_retry_sleep_time; enum exceptionType { none, typeBadRequest, typeException }; exceptionType exception_raised; string exception_message; do { std::chrono::high_resolution_clock::time_point tStart; try { exception_raised = none; http_code = 0; if (m_log) { m_ofs << endl << method << " " << path << endl; m_ofs << "Headers:" << endl; for (auto it = header.begin(); it != header.end(); it++) { m_ofs << " " << it->first << ": " << it->second << endl; } m_ofs << "Payload:" << endl; m_ofs << payload << endl; tStart = std::chrono::high_resolution_clock::now(); } // Call HTTPS method auto res = m_sender->request(method, path, payload, header); retCode = res->status_code; response = res->content.string(); m_HTTPResponse = response; if (m_log) { std::chrono::high_resolution_clock::time_point tEnd = std::chrono::high_resolution_clock::now(); m_ofs << "Response:" << endl; m_ofs << " Code: " << res->status_code << endl; m_ofs << " Time: " << ((double)std::chrono::duration_cast<std::chrono::microseconds>(tEnd - tStart).count()) / 1.0E6 << " sec " << CurrentTimeString() << endl; m_ofs << " Content: " << res->content.string() << endl << endl; } // In same cases the response is an empty string // and retCode contains code and the description if (response.compare("") == 0) response = res->status_code; http_code = atoi(retCode.c_str()); } catch (BadRequest &ex) { exception_raised = typeBadRequest; exception_message = ex.what(); } catch (exception &ex) { exception_raised = typeException; exception_message = "Failed to send data: "; exception_message.append(ex.what()); } if (exception_raised == none && ((http_code >= 200) && (http_code <= 399))) { retry = false; #if VERBOSE_LOG Logger::getLogger()->info("HTTPS sendRequest succeeded : retry count |%d| HTTP code |%d| message |%s|", retry_count, http_code, payload.c_str()); #endif } else { #if VERBOSE_LOG if (exception_raised) { Logger::getLogger()->error( "HTTPS sendRequest : retry count |%d| error |%s| message |%s|", retry_count, exception_message.c_str(), payload.c_str()); } else { Logger::getLogger()->error( "HTTPS sendRequest : retry count |%d| HTTP code |%d| HTTP error |%s| message |%s|", retry_count, http_code, response.c_str(), payload.c_str()); } #endif if (m_log && !exception_message.empty()) { std::chrono::high_resolution_clock::time_point tEnd = std::chrono::high_resolution_clock::now(); m_ofs << " Time: " << ((double)std::chrono::duration_cast<std::chrono::microseconds>(tEnd - tStart).count()) / 1.0E6 << " sec " << CurrentTimeString() << endl; m_ofs << " Exception: " << exception_message << endl; } if (retry_count < m_max_retry) { this_thread::sleep_for(chrono::seconds(sleep_time)); retry = true; sleep_time *= 2; retry_count++; } else { retry = false; } } } while (retry); // Check if an error should be raised if (exception_raised == none) { // If 400 Bad Request, throw BadRequest exception if (http_code == 400) { throw BadRequest(response); } else if (http_code == 401) { throw Unauthorized(response); } else if (http_code == 409) { throw Conflict(response); } else if (http_code > 401) { std::stringstream error_message; StringReplace(response, "\r\n", ""); error_message << "HTTP code |" << to_string(http_code) << "| HTTP error |" << response << "|"; throw runtime_error(error_message.str()); } } else { if (exception_raised == typeBadRequest) { throw BadRequest(exception_message); } else if (exception_raised == typeException) { throw runtime_error(exception_message); } } return http_code; } ================================================ FILE: C/plugins/filter/common/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.4.0) project(filters-common-lib) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) # Find source files file(GLOB SOURCES *.cpp) # Include header files include_directories(include ../../../common/include ../../../services/common/include) include_directories(../../../thirdparty/Simple-Web-Server) include_directories(../../../thirdparty/rapidjson/include) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES} ${EXTRA_SOURCES}) target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/plugins/filter/common/filter.cpp ================================================ /* * Fledge base FledgeFilter class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <filter.h> using namespace std; /** * FledgeFilter constructor * * This class or a derived one has to be used * as return object from Fledge filters C interface "plugin_init"A * * @param filterName The filter plugin name * @param filterConfig The filter plugin configuration * @param outHandle A handle passed to the filter output stream function * @param output The The output stream function pointer */ FledgeFilter::FledgeFilter(const string& filterName, ConfigCategory& filterConfig, OUTPUT_HANDLE *outHandle, OUTPUT_STREAM output) : m_name(filterName), m_config(filterConfig), m_enabled(false) { m_data = outHandle; m_func = output; // Set the enable flag if (m_config.itemExists("enable")) { m_enabled = m_config.getValue("enable").compare("true") == 0 || m_config.getValue("enable").compare("True") == 0; } } /** * Set a new configurartion for this plugin * * @param newConfig The new configuration */ void FledgeFilter::setConfig(const string& newConfig) { m_config = ConfigCategory(m_name, newConfig); m_enabled = m_config.getValue("enable").compare("true") == 0 || m_config.getValue("enable").compare("True") == 0; } ================================================ FILE: C/plugins/filter/common/include/filter.h ================================================ #ifndef _FLEDGE_FITER_H #define _FLEDGE_FITER_H /* * Fledge base FledgeFilter class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <string> #include <config_category.h> #include <filter_plugin.h> class FledgeFilter{ public: FledgeFilter(const std::string& filterName, ConfigCategory& filterConfig, OUTPUT_HANDLE *outHandle, OUTPUT_STREAM output); ~FledgeFilter() {}; const std::string& getName() const { return m_name; }; bool isEnabled() const { return m_enabled; }; ConfigCategory& getConfig() { return m_config; }; void disableFilter() { m_enabled = false; }; void setConfig(const std::string& newConfig); public: OUTPUT_HANDLE* m_data; OUTPUT_STREAM m_func; protected: std::string m_name; ConfigCategory m_config; bool m_enabled; }; #endif ================================================ FILE: C/plugins/north/OMF/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(OMF) set(CMAKE_CXX_FLAGS "-std=c++11 -O3") set_source_files_properties(version.h PROPERTIES GENERATED TRUE) add_custom_command( OUTPUT version.h DEPENDS ${CMAKE_SOURCE_DIR}/VERSION COMMAND ${CMAKE_SOURCE_DIR}/mkversion ${CMAKE_SOURCE_DIR} COMMENT "Generating version header" VERBATIM ) include_directories(${CMAKE_BINARY_DIR}) # Add here all needed Fledge libraries as list set(NEEDED_FLEDGE_LIBS common-lib plugins-common-lib) set(COMMON_LIBS -lssl -lcrypto) # Find source files file(GLOB SOURCES *.cpp) # Include header files include_directories(include) include_directories(../../../services/common/include) include_directories(../../../common/include) include_directories(../../../plugins/common/include) include_directories(../../../thirdparty/Simple-Web-Server) include_directories(../../../thirdparty/rapidjson/include) link_directories(${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES} version.h) target_link_libraries(${PROJECT_NAME} ${NEEDED_FLEDGE_LIBS}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIBS}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/plugins/north/${PROJECT_NAME}) ================================================ FILE: C/plugins/north/OMF/OMFError.cpp ================================================ /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <utility> #include <iostream> #include <string> #include <cstring> #include <logger.h> #include "string_utils.h" #include <iterator> #include <typeinfo> #include <algorithm> #include <omferror.h> #include <rapidjson/error/en.h> #include <stdio.h> using namespace std; using namespace rapidjson; /** * Constructors */ OMFError::OMFError() : m_hasErrors(false) { } OMFError::OMFError(const string& json) { setFromHttpResponse(json); } /** * Destructor for the error class */ OMFError::~OMFError() { } /** * Parse error information from an OMF POST response JSON document * * @param json JSON response document from an OMF POST */ void OMFError::setFromHttpResponse(const string& json) { m_messages.clear(); m_hasErrors = false; char *p = (char *)json.c_str(); FILE *fp = fopen("/tmp/error", "a"); fprintf(fp, "%s\n\n", p); fclose(fp); while (*p && *p != '{') p++; Document doc; if (doc.ParseInsitu(p).HasParseError()) { Logger::getLogger()->error("Unable to parse response from OMF endpoint: %s", GetParseError_En(doc.GetParseError())); Logger::getLogger()->error("Error response was: %s", json.c_str()); } else if (doc.HasMember("Messages") && doc["Messages"].IsArray()) { const Value& messages = doc["Messages"].GetArray(); for (Value::ConstValueIterator a = messages.Begin(); a != messages.End(); a++) { const Value& msg = *a; int httpCode = 200; if (msg.HasMember("Status") && msg["Status"].IsObject()) { const Value& status = msg["Status"]; if (status.HasMember("Code") && status["Code"].IsInt()) { httpCode = status["Code"].GetInt(); } } if (msg.HasMember("Events") && msg["Events"].IsArray()) { const Value& events = msg["Events"]; for (Value::ConstValueIterator b = events.Begin(); b != events.End(); b++) { const Value &event = *b; string message, reason, severity, exceptionType, exceptionMessage; // ExceptionInfo can appear inside an Event object // or inside an Event->InnerEvents object if (event.HasMember("ExceptionInfo") && event["ExceptionInfo"].IsObject()) { const Value &exceptionInfo = event["ExceptionInfo"]; if (exceptionInfo.HasMember("Type") && exceptionInfo["Type"].IsString()) { exceptionType = exceptionInfo["Type"].GetString(); } if (exceptionInfo.HasMember("Message") && exceptionInfo["Message"].IsString()) { exceptionMessage = exceptionInfo["Message"].GetString(); std::string crlf(2, '\r'); crlf[1] = '\n'; StringReplaceAll(exceptionMessage, crlf, " "); exceptionMessage = StringStripWhiteSpacesExtra(exceptionMessage); } } else if (event.HasMember("InnerEvents") && event["InnerEvents"].IsArray()) { rapidjson::GenericArray<true, rapidjson::Value> innerEvents = event["InnerEvents"].GetArray(); if (innerEvents.Size() > 0) { const Value &innerEvent = innerEvents[0]; if (innerEvent.HasMember("ExceptionInfo") && innerEvent["ExceptionInfo"].IsObject()) { const Value &exceptionInfo = innerEvent["ExceptionInfo"]; if (exceptionInfo.HasMember("Type") && exceptionInfo["Type"].IsString()) { exceptionType = exceptionInfo["Type"].GetString(); } if (exceptionInfo.HasMember("Message") && exceptionInfo["Message"].IsString()) { exceptionMessage = exceptionInfo["Message"].GetString(); std::string crlf(2, '\r'); crlf[1] = '\n'; StringReplaceAll(exceptionMessage, crlf, " "); exceptionMessage = StringStripWhiteSpacesExtra(exceptionMessage); } } } } if (event.HasMember("Severity") && event["Severity"].IsString()) { severity = event["Severity"].GetString(); if (severity.compare("Error") == 0) { m_hasErrors = true; } } if (event.HasMember("EventInfo") && event["EventInfo"].IsObject()) { const Value& eventInfo = event["EventInfo"]; if (eventInfo.HasMember("Message") && eventInfo["Message"].IsString()) { message = eventInfo["Message"].GetString(); } if (eventInfo.HasMember("Reason") && eventInfo["Reason"].IsString()) { reason = eventInfo["Reason"].GetString(); } } m_messages.push_back(Message(severity, message, reason, exceptionType, exceptionMessage, httpCode)); } } } } } /** * Return the most severe HTTP Code from all messages. * PI Web API HTTP Codes are usually the same within one HTTP response. * * @return HTTP Code */ int OMFError::getHttpCode() { int httpCode = 200; for (Message &msg : m_messages) { if (msg.getHttpCode() > httpCode) { httpCode = msg.getHttpCode(); } } return httpCode; } /** * Return the error message for the given message * * @param offset The error within the report to return * @return string The event message */ string OMFError::getMessage(unsigned int offset) { string rval; if (offset < messageCount()) { rval = m_messages[offset].getMessage(); } return rval; } /** * Return the error reason for the given message * * @param offset The error within the report to return * @return string The event reason */ string OMFError::getEventReason(unsigned int offset) { string rval; if (offset < messageCount()) { rval = m_messages[offset].getReason(); } return rval; } /** * Get the event severity for a given message * * @param offset The message to examine * @return string The event severity */ string OMFError::getEventSeverity(unsigned int offset) { string rval; if (offset < messageCount()) { rval = m_messages[offset].getSeverity(); } return rval; } /** * Get the event exception type for a given message * * @param offset The message to examine * @return string The event exception type */ string OMFError::getEventExceptionType(unsigned int offset) { string rval; if (offset < messageCount()) { rval = m_messages[offset].getExceptionType(); } return rval; } /** * Get the event exception message for a given message * * @param offset The message to examine * @return string The event exception message */ string OMFError::getEventExceptionMessage(unsigned int offset) { string rval; if (offset < messageCount()) { rval = m_messages[offset].getExceptionMessage(); } return rval; } /** * Log all available messages * * @param mainMessage Top-level message when reporting an error * @param filterDuplicates If true, do not log duplicate messages * @return True if OMFError object holds at least one message */ bool OMFError::Log(const std::string &mainMessage, bool filterDuplicates) { if (hasMessages()) { if (hasErrors()) { Logger::getLogger()->error("HTTP %d: %s: %u %s", getHttpCode(), mainMessage.c_str(), messageCount(), (messageCount() == 1) ? "message" : "messages"); } else { Logger::getLogger()->warn("HTTP %d: %s: %u %s", getHttpCode(), mainMessage.c_str(), messageCount(), (messageCount() == 1) ? "message" : "messages"); } std::string lastMessage; std::string lastExceptionMessage; unsigned int numDuplicates = 0; for (unsigned int i = 0; i < messageCount(); i++) { Message &msg = m_messages[i]; std::string errorMessage = msg.getMessage(); std::string exceptionMessage = msg.getExceptionMessage(); if (filterDuplicates && (0 == errorMessage.compare(lastMessage)) && (0 == exceptionMessage.compare(lastExceptionMessage))) { numDuplicates++; } else { if (msg.getSeverity().compare("Error") == 0) { Logger::getLogger()->error("Message %u HTTP %d: %s, %s, %s", i, msg.getHttpCode(), msg.getSeverity().c_str(), errorMessage.c_str(), msg.getReason().c_str()); } else { Logger::getLogger()->warn("Message %u HTTP %d: %s, %s, %s", i, msg.getHttpCode(), msg.getSeverity().c_str(), errorMessage.c_str(), msg.getReason().c_str()); } if (!exceptionMessage.empty() && (0 != errorMessage.compare(exceptionMessage))) { Logger::getLogger()->error("Message %u Exception: %s (%s)", i, exceptionMessage.c_str(), msg.getExceptionType().c_str()); } lastMessage = errorMessage; lastExceptionMessage = exceptionMessage; } } if (numDuplicates > 0) { Logger::getLogger()->warn("%u duplicate messages skipped", numDuplicates); } } return hasMessages(); } ================================================ FILE: C/plugins/north/OMF/include/OMFHint.h ================================================ #ifndef _OMF_HINT_H #define _OMF_HINT_H #include <rapidjson/document.h> /** * Virtual base class for an OMF Hint */ class OMFHint { public: virtual ~OMFHint() = default; const std::string& getHint() const { return m_hint; }; protected: std::string m_hint; }; /** * A number hint, defines how number type should be defined, float64 or float32 */ class OMFNumberHint : public OMFHint { public: OMFNumberHint(const std::string& type) { m_hint = type; }; ~OMFNumberHint() {}; }; /** * A integer hint, defines how integer type should be defined, int64, int32 or int16 */ class OMFIntegerHint : public OMFHint { public: OMFIntegerHint(const std::string& type) { m_hint = type; }; ~OMFIntegerHint() {}; }; /** * A tag hint, used to define an existing OMF container or tag to use */ class OMFTagHint : public OMFHint { public: OMFTagHint(const std::string& tag) { m_hint = tag; }; ~OMFTagHint() {}; }; /** * A Type name hint, tells us how to name the types we use */ class OMFTypeNameHint : public OMFHint { public: OMFTypeNameHint(const std::string& name) { m_hint = name; }; ~OMFTypeNameHint() {}; }; /** * A tag name hint, tells us which Container name to use in PI */ class OMFTagNameHint : public OMFHint { public: OMFTagNameHint(const std::string& name) { m_hint = name; }; ~OMFTagNameHint() {}; }; /** * A tag name hint, defines which PI Tag to use for a Datapoint */ class OMFTagNameDatapointHint : public OMFHint { public: OMFTagNameDatapointHint(const std::string &name) { m_hint = name; }; ~OMFTagNameDatapointHint() {}; }; /** * A AFLocation hint, tells us in which Asset Framework hierarchy the asset should be created */ class OMFAFLocationHint : public OMFHint { public: OMFAFLocationHint(const std::string& name) { m_hint = name; }; ~OMFAFLocationHint() {}; }; /** * A Legacy type hint, tells the OMF plugin to send complex types for this asset */ class OMFLegacyTypeHint : public OMFHint { public: OMFLegacyTypeHint(const std::string& name) { m_hint = name; }; ~OMFLegacyTypeHint() {}; }; /** * A Source hint, defines the data source for the asset or datapoint */ class OMFSourceHint : public OMFHint { public: OMFSourceHint(const std::string& name) { m_hint = name; }; ~OMFSourceHint() {}; }; /** * A unit of measurement hint, defines the unit of measurement for a datapoint */ class OMFUOMHint : public OMFHint { public: OMFUOMHint(const std::string& name) { m_hint = name; }; ~OMFUOMHint() {}; }; /** * A minimum hint, defines the minimum value for a property */ class OMFMinimumHint : public OMFHint { public: OMFMinimumHint(const std::string& name) { m_hint = name; }; ~OMFMinimumHint() {}; }; /** * A maximum hint, defines the maximum value for a property */ class OMFMaximumHint : public OMFHint { public: OMFMaximumHint(const std::string& name) { m_hint = name; }; ~OMFMaximumHint() {}; }; /** * A interpolation hint, defines the interpolation value for a property */ class OMFInterpolationHint : public OMFHint { public: OMFInterpolationHint(const std::string& name) { m_hint = name; }; ~OMFInterpolationHint() {}; }; /** * A set of hints for a reading * * documentation available at: * https://fledge-iot.readthedocs.io/en/latest/plugins/fledge-filter-omfhint/index.html * https://fledge-iot.readthedocs.io/en/latest/OMF.html#omf-hints * */ class OMFHints { public: OMFHints(const std::string& hints); ~OMFHints(); const std::vector<OMFHint *>& getHints() const { return m_hints; }; const std::vector<OMFHint *>& getHints(const std::string&) const; const unsigned short getChecksum() { return m_chksum; }; static string getHintForChecksum(const string &hint); private: rapidjson::Document m_doc; unsigned short m_chksum; std::vector<OMFHint *> m_hints; std::map<std::string, std::vector<OMFHint *> > m_datapointHints; }; #endif ================================================ FILE: C/plugins/north/OMF/include/basetypes.h ================================================ #ifndef _BASETYPES_H #define _BASETYPES_H /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_api.h> static const char *baseOMFTypes = QUOTE( [ { "id":"Double64", "type":"object", "classification":"dynamic", "properties":{ "Double64":{ "type":["number", "null"], "format":"float64" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"Double32", "type":"object", "classification":"dynamic", "properties":{ "Double32":{ "type":["number", "null"], "format":"float32" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"Integer16", "type":"object", "classification":"dynamic", "properties":{ "Integer16":{ "type":["integer","null"], "format":"int16" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"Integer32", "type":"object", "classification":"dynamic", "properties":{ "Integer32":{ "type":["integer","null"], "format":"int32" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"Integer64", "type":"object", "classification":"dynamic", "properties":{ "Integer64":{ "type":["integer","null"], "format":"int64" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"UInteger16", "type":"object", "classification":"dynamic", "properties":{ "UInteger16":{ "type":["integer","null"], "format":"uint16" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"UInteger32", "type":"object", "classification":"dynamic", "properties":{ "UInteger32":{ "type":["integer","null"], "format":"uint32" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"UInteger64", "type":"object", "classification":"dynamic", "properties":{ "UInteger64":{ "type":["integer","null"], "format":"uint64" }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } }, { "id":"String", "type":"object", "classification":"dynamic", "properties":{ "String":{ "type":["string","null"] }, "Time":{ "type":"string", "format":"date-time", "isindex":true } } } ]); #endif ================================================ FILE: C/plugins/north/OMF/include/linkedlookup.h ================================================ #ifndef _LINKEDLOOKUP_H #define _LINKEDLOOKUP_H typedef enum { OMFBT_UNKNOWN, OMFBT_DOUBLE64, OMFBT_DOUBLE32, OMFBT_INTEGER16, OMFBT_INTEGER32, OMFBT_INTEGER64, OMFBT_UINTEGER16, OMFBT_UINTEGER32, OMFBT_UINTEGER64, OMFBT_STRING, OMFBT_FLEDGEASSET } OMFBaseType; /** * Lookup status bit */ #define LAL_ASSET_SENT 0x01 // We have sent the asset #define LAL_LINK_SENT 0x02 // We have sent the link to the base type #define LAL_CONTAINER_SENT 0x04 // We have sent the container #define LAL_AFLINK_SENT 0x08 // We have sent the link to the AF location /** * Linked Asset Information class * * This is the data stored for each asset and asset datapoint pair that * is being sent to PI using the linked container mechanism. We use the class * so we can combine all the information we need in a single lookup table, * this not only saves space but allows to build and retain the table * before we start building the payloads. This hopefully will help prevent * to much memory fragmentation, which was an issue with the old, separate * lookup mechanism we had. */ class LALookup { public: LALookup() { m_sentState = 0; m_baseType = OMFBT_UNKNOWN; }; bool assetState(const std::string& tagName) { return ((m_sentState & LAL_ASSET_SENT) != 0) && (m_tagName.compare(tagName) == 0); }; bool linkState(const std::string& tagName) { return ((m_sentState & LAL_LINK_SENT) != 0) && (m_tagName.compare(tagName) == 0); }; bool containerState(const std::string& tagName) { return ((m_sentState & LAL_CONTAINER_SENT) != 0) && (m_tagName.compare(tagName) == 0); }; bool afLinkState() { return (m_sentState & LAL_AFLINK_SENT) != 0; }; void setBaseType(const std::string& baseType); OMFBaseType getBaseType() { return m_baseType; }; std::string getBaseTypeString(); void assetSent(const std::string& tagName) { if (m_tagName.compare(tagName)) { m_sentState = LAL_ASSET_SENT; m_tagName = tagName; } else { m_sentState |= LAL_ASSET_SENT; } }; void linkSent(const std::string& tagName) { if (m_tagName.compare(tagName)) { // Force the container to resend if the tagName changes m_tagName = tagName; m_sentState &= ~LAL_CONTAINER_SENT; } m_sentState |= LAL_LINK_SENT; }; void afLinkSent() { m_sentState |= LAL_AFLINK_SENT; }; void containerSent(const std::string& tagName, const std::string& baseType); void containerSent(const std::string& tagName, OMFBaseType baseType); private: uint8_t m_sentState; OMFBaseType m_baseType; std::string m_tagName; }; #endif ================================================ FILE: C/plugins/north/OMF/include/ocs.h ================================================ #ifndef _OCS_H #define _OCS_H /* * Fledge OSIsoft ADH and OCS integration. * * Copyright (c) 2020-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <string> #include <chrono> using namespace std; #define TIMEOUT_CONNECT 10 #define TIMEOUT_REQUEST 10 #define RETRY_SLEEP_TIME 1 #define URL_RETRIEVE_TOKEN "/identity/connect/token" #define PAYLOAD_RETRIEVE_TOKEN "grant_type=client_credentials&client_id=CLIENT_ID_PLACEHOLDER&client_secret=CLIENT_SECRET_ID_PLACEHOLDER" /** * The OCS class. */ class OCS { public: OCS(const std::string &authorizationUrl); // Destructor ~OCS(); std::string OCSRetrieveAuthToken(const string& clientId, const string& clientSecret, bool logMessage = true); int retrieveToken(const string& clientId, const string& clientSecret, bool logMessage = true); void extractToken(const string& response); private: std::string m_token; std::string m_authUrl; unsigned int m_expiresIn; std::chrono::steady_clock::time_point m_nextAuthentication; }; #endif ================================================ FILE: C/plugins/north/OMF/include/omf.h ================================================ #ifndef _OMF_H #define _OMF_H /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2018-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <string> #include <vector> #include <map> #include <unordered_map> #include <reading.h> #include <http_sender.h> #include <zlib.h> #include <rapidjson/document.h> #include <omfbuffer.h> #include <omferror.h> #include <linkedlookup.h> #define OMF_HINT "OMFHint" #define PIWEBAPI_PIPOINTS_NOT_CREATED "One or more PI Points could not be created." #define PIWEBAPI_CONTAINER_NOT_FOUND "Container not found." #define PIWEBAPI_UPDATE_EXCEPTION "An exception occurred while updating data." #define MESSAGE_PI_UNSTABLE "HTTP Code %d: Processing cannot continue until data archive errors are corrected" #define MESSAGE_UNAUTHORIZED "OMF endpoint reported we are not authorized, please check configuration of the authentication method and credentials" const char *const noConnectionErrorMessages[] = {"Failed to send data: Operation canceled", // PI Web API "Failed to send data: Connection refused", // Edge Data Store "Failed to send data: Host not found", // usually followed by "(authoritative)" or "(non-authoritative), try again later" "Failed to send data: Network is unreachable", ""}; // empty string marks the end of the array // The following will force the OMF version for EDS endpoints // Remove or comment out the line below to prevent the forcing // of the version #define EDS_OMF_VERSION "1.0" #define CR_OMF_VERSION "1.0" #define TYPE_ID_DEFAULT 1 #define FAKE_ASSET_KEY "_default_start_id_" #define OMF_TYPE_STRING "string" #define OMF_TYPE_INTEGER "integer" #define OMF_TYPE_FLOAT "number" #define OMF_TYPE_UNSUPPORTED "unsupported" enum OMF_ENDPOINT { ENDPOINT_PIWEB_API, ENDPOINT_CR, ENDPOINT_OCS, ENDPOINT_EDS, ENDPOINT_ADH }; // Documentation about the Naming Scheme available at: https://fledge-iot.readthedocs.io/en/latest/OMF.html#naming-scheme enum NAMINGSCHEME_ENDPOINT { NAMINGSCHEME_CONCISE, NAMINGSCHEME_SUFFIX, NAMINGSCHEME_HASH, NAMINGSCHEME_COMPATIBILITY }; using namespace std; using namespace rapidjson; std::string ApplyPIServerNamingRules(const std::string &objName, bool *changed); std::string DataPointNamesAsString(const Reading& reading); /** * Per asset dataTypes - This class is used in a std::map where assetName is a key * * - typeId = id of the type, it is incremented if the type is redefined * - types = is a JSON string with datapoint names and types * - typesShort = a numeric representation of the type used to quickly identify if a type has changed * - namingScheme = Naming schema of the asset, valid options are Concise, Backward compatibility .. * - afhHash = Asset hash based on the AF hierarchy * - afHierarchy = Current position of the asset in the AF hierarchy * - afHierarchyOrig = Original position of the asset in the AF hierarchy, in case the asset was moved * - hintChkSum = Checksum of the OMF hints */ class OMFDataTypes { public: long typeId; std::string types; unsigned long typesShort; long namingScheme; string afhHash; string afHierarchy; string afHierarchyOrig; unsigned short hintChkSum; }; class OMFHints; /** * The OMF class. * Implements the OMF protocol */ class OMF { public: /** * Constructor: * pass server URL path, OMF_type_id and producerToken. */ OMF(const std::string& name, HttpSender& sender, const std::string& path, const long typeId, const std::string& producerToken); OMF(const std::string& name, HttpSender& sender, const std::string& path, std::map<std::string, OMFDataTypes>& types, const std::string& producerToken); // Destructor ~OMF(); void setOMFVersion(std::string& omfversion) { m_OMFVersion = omfversion; if (omfversion.compare("1.0") == 0 || omfversion.compare("1.1") == 0) { m_linkedProperties = false; } else { m_linkedProperties = true; } }; void setSender(HttpSender& sender) { m_sender = sender; }; /** * Send data to PI Server passing a vector of readings. * * Data sending is composed by a few phases * handled by private methods. * * Note: DataTypes are sent only once by using * an in memory key map, being the key = assetName + typeId. * Passing false to skipSentDataTypes changes the logic. * * Returns the number of processed readings. */ // Method with vector (by reference) of readings uint32_t sendToServer(const std::vector<Reading>& readings, bool skipSentDataTypes = true); // never called // Method with vector (by reference) of reading pointers uint32_t sendToServer(const std::vector<Reading *>& readings, bool compression, bool skipSentDataTypes = true); // Send a single reading (by reference) uint32_t sendToServer(const Reading& reading, bool skipSentDataTypes = true); // never called // Send a single reading pointer uint32_t sendToServer(const Reading* reading, bool skipSentDataTypes = true); // never called // Set saved OMF formats void setFormatType(const std::string &key, std::string &value); // Set which PIServer component should be used for the communication void setPIServerEndpoint(const OMF_ENDPOINT PIServerEndpoint); // Set the naming scheme of the objects in the endpoint void setNamingScheme(const NAMINGSCHEME_ENDPOINT namingScheme) {m_NamingScheme = namingScheme;}; // Generate the container id for the given asset std::string generateMeasurementId(const string& assetName); // Generate a suffix for the given asset in relation to the selected naming schema and the value of the type id std::string generateSuffixType(string &assetName, long typeId); // Generate a suffix for the given asset in relation to the selected naming schema and the value of the type id long getNamingScheme(const string& assetName); string getHashStored(const string& assetName); string getPathStored(const string& assetName); string getPathOrigStored(const string& assetName); bool setPathStored(const string& assetName, string &afHierarchy); bool deleteAssetAFH(const string& assetName, string& path); bool createAssetAFH(const string& assetName, string& path); // Set the first level of hierarchy in Asset Framework in which the assets will be created, PI Web API only. void setDefaultAFLocation(const std::string &DefaultAFLocation); bool setAFMap(const std::string &AFMap); void setSendFullStructure(const bool sendFullStructure) {m_sendFullStructure = sendFullStructure;}; void setPrefixAFAsset(const std::string &prefixAFAsset); void setDelimiter(const std::string &delimiter) {m_delimiter = delimiter;}; void setDataActionCode(const std::string &actionCode) {m_dataActionCode = actionCode;}; // Get saved OMF formats std::string getFormatType(const std::string &key) const; // Set the list of errors considered not blocking // in the communication with the PI Server void setNotBlockingErrors(std::vector<std::string>& ); // Compress string using gzip std::string compress_string(const std::string& str, int compressionlevel = Z_DEFAULT_COMPRESSION); // Return current value of global type-id const long getTypeId() const { return m_typeId; }; // Check DataTypeError bool isDataTypeError(const char* message); // Check if plugin configuration is working and PI is stable bool isPIstable() { return m_PIstable; }; // Check if plugin is connected to PI bool isPIconnected() { return m_connected; }; // Set PI connection status void setPIconnected(bool connectionStatus) { m_connected = connectionStatus; }; // Get and Set number of blocks of Readings std::size_t getNumBlocks() { return m_numBlocks; }; void setNumBlocks(std::size_t numBlocks) { m_numBlocks = numBlocks; }; // Map object types found in input data void setMapObjectTypes(const std::vector<Reading *>& data, std::map<std::string, Reading*>& dataSuperSet); // Removed mapped object types found in input data void unsetMapObjectTypes(std::map<std::string, Reading*>& dataSuperSet) const; void setStaticData(std::vector<std::pair<std::string, std::string>> *staticData) { m_staticData = staticData; }; void generateAFHierarchyPrefixLevel(string& path, string& prefix, string& AFHierarchyLevel); // Retrieve private objects map<std::string, std::string> getNamesRules() const { return m_NamesRules; }; map<std::string, std::string> getMetadataRulesExist() const { return m_MetadataRulesExist; }; bool getAFMapEmptyNames() const { return m_AFMapEmptyNames; }; bool getAFMapEmptyMetadata() const { return m_AFMapEmptyMetadata; }; void setLegacyMode(bool legacy) { m_legacy = legacy; }; static std::string ApplyPIServerNamingRulesObj(const std::string &objName, bool *changed); static std::string ApplyPIServerNamingRulesPath(const std::string &objName, bool *changed); static std::string ApplyPIServerNamingRulesInvalidChars(const std::string &objName, bool *changed); static std::string variableValueHandle(const Reading& reading, std::string &AFHierarchy); static bool extractVariable(string &strToHandle, string &variable, string &value, string &defaultValue); static void reportAsset(const string& asset, const string& level, const string& msg); private: /** * Builds the HTTP header to send * messagetype header takes the passed type value: * 'Type', 'Container', 'Data' */ const std::vector<std::pair<std::string, std::string>> createMessageHeader(const std::string& type, const std::string& action="create") const; // Create data for Type message for current row const std::string createTypeData(const Reading& reading, OMFHints *hints); // Create data for Container message for current row const std::string createContainerData(const Reading& reading, OMFHints *hints); // Create data for additional type message, with 'Data' for current row const std::string createStaticData(const Reading& reading); // Create data Link message, with 'Data', for current row std::string createLinkData(const Reading& reading, std::string& AFHierarchyLevel, std::string& prefix, std::string& objectPrefix, OMFHints *hints, bool legacy); /** * Create data for readings data content, with 'Data', for one row * The new formatted data have to be added to a new JSON doc to send. * we want to avoid sending of one data row */ const std::string createMessageData(Reading& reading); // Set the the tagName in an assetName Type message void setAssetTypeTag(const std::string& assetName, const std::string& tagName, std::string& data); void setAssetTypeTagNew(const std::string& assetName, const std::string& tagName, std::string& data); // Create the OMF data types if needed bool handleDataTypes(const string keyComplete, const Reading& row, bool skipSendingTypes, OMFHints *hints); // Send OMF data types bool sendDataTypes(const Reading& row, OMFHints *hints); // Get saved dataType bool getCreatedTypes(const std::string& keyComplete, const Reading& row, OMFHints *hints); // Set saved dataType unsigned long calcTypeShort(const Reading& row); // Clear data types cache void clearCreatedTypes(); // Increment type-id value void incrementTypeId(); // Handle data type errors bool handleTypeErrors(const string& keyComplete, const Reading& reading, OMFHints*hints); string errorMessageHandler(const string &msg); void handleRESTException(const std::exception &e, const char *mainMessage); void CheckHttpCode(const int httpCode, const std::string &errorMessage); std::string getExceptionMessage(const std::exception &e, OMFError *error); // Extract assetName from error message std::string getAssetNameFromError(const char* message); // Get asset type-id from cached data long getAssetTypeId(const std::string& assetName); // Increment per asset type-id value void incrementAssetTypeId(const std::string& keyComplete); void incrementAssetTypeIdOnly(const std::string& keyComplete); // Set global type-id as the maximum value of all per asset type-ids void setTypeId(); // Set saved dataType bool setCreatedTypes(const Reading& row, OMFHints *hints); // Remove cached data types entry for given asset name void clearCreatedTypes(const std::string& keyComplete); // Add the 1st level of AF hierarchy if the end point is PI Web API void setAFHierarchy(); bool handleAFHierarchy(); bool handleAFHierarchySystemWide(); bool handleOmfHintHierarchies(); bool sendAFHierarchy(std::string AFHierarchy); bool sendAFHierarchyLevels(std::string parentPath, std::string path, std::string &lastLevel); bool sendAFHierarchyTypes(const std::string AFHierarchyLevel, const std::string prefix); bool sendAFHierarchyStatic(const std::string AFHierarchyLevel, const std::string prefix); bool sendAFHierarchyLink(std::string parent, std::string child, std::string prefixIdParent, std::string prefixId); bool manageAFHierarchyLink(std::string parent, std::string child, std::string prefixIdParent, std::string prefixId, std::string childFull, string action); bool AFHierarchySendMessage(const std::string& msgType, std::string& jsonData, const std::string& action="create"); std::string generateUniquePrefixId(const std::string &path); bool evaluateAFHierarchyRules(const string& assetName, const Reading& reading); void retrieveAFHierarchyPrefixAssetName(const string& assetName, string& prefix, string& AFHierarchyLevel); void retrieveAFHierarchyFullPrefixAssetName(const string& assetName, string& prefix, string& AFHierarchy); bool createAFHierarchyOmfHint(const string& assetName, const string &OmfHintHierarchy); bool HandleAFMapNames(Document& JSon); bool HandleAFMapMetedata(Document& JSon); // Start of support for using linked containers bool sendBaseTypes(); bool sendFledgeAssetType(); bool sendAFLinks(Reading& reading, OMFHints *hints); // End of support for using linked containers // string createAFLinks(Reading &reading, OMFHints *hints); private: // Use for the evaluation of the OMFDataTypes.typesShort union t_typeCount { struct { unsigned char tTotal; unsigned char tFloat; unsigned char tString; unsigned char spare0; unsigned char spare1; unsigned char spare2; unsigned char spare3; unsigned char spare4; } cnt; unsigned long valueLong = 0; }; std::string m_assetName; const std::string m_path; long m_typeId; const std::string m_producerToken; OMF_ENDPOINT m_PIServerEndpoint; NAMINGSCHEME_ENDPOINT m_NamingScheme; std::string m_DefaultAFLocation; bool m_sendFullStructure; // If disabled the AF hierarchy is not created. std::string m_delimiter; std::string m_dataActionCode; // Asset Framework Hierarchy Rules handling - Metadata MAP // Documentation: https://fledge-iot.readthedocs.io/en/latest/plugins/fledge-north-OMF/index.html?highlight=hierarchy#asset-framework-hierarchy-rules std::string m_AFMap; bool m_AFMapEmptyNames; // true if there are no rules to manage bool m_AFMapEmptyMetadata; std::string m_AFHierarchyLevel; std::string m_prefixAFAsset; vector<std::string> m_afhHierarchyAlreadyCreated={ // Asset Framework path // {""} }; map<std::string, std::string> m_NamesRules={ // Asset_name - Asset Framework path // {"", ""} }; map<std::string, std::string> m_MetadataRulesExist={ // Property - Asset Framework path // {"", ""} }; map<std::string, std::string> m_MetadataRulesNonExist={ // Property - Asset Framework path // {"", ""} }; map<std::string, vector<pair<string, string>>> m_MetadataRulesEqual={ // Property - Value - Asset Framework path // {"", {{"", ""}} } }; map<std::string, vector<pair<string, string>>> m_MetadataRulesNotEqual={ // Property - Value - Asset Framework path // {"", {{"", ""}} } }; map<std::string, vector<pair<string, string>>> m_AssetNamePrefix ={ // Property - Hierarchy - prefix // {"", {{"", ""}} } }; // Define the OMF format to use for each type // the format will not be applied if the string is empty std::map<const std::string, std::string> m_formatTypes { {OMF_TYPE_STRING, ""}, {OMF_TYPE_INTEGER,"int64"}, {OMF_TYPE_FLOAT, "float64"}, {OMF_TYPE_UNSUPPORTED, "unsupported"} }; // Vector with OMF_TYPES const std::vector<std::string> omfTypes = { OMF_TYPE_STRING, OMF_TYPE_FLOAT, // Forces the creation of float also for integer numbers OMF_TYPE_FLOAT, OMF_TYPE_UNSUPPORTED}; // HTTP Sender interface HttpSender& m_sender; bool m_changeTypeId; // These errors are considered not blocking in the communication // with the destination, the sending operation will proceed // with the next block of data if one of these is encountered std::vector<std::string> m_notBlockingErrors; // Data types cache[key] = (key_type_id, key data types) std::map<std::string, OMFDataTypes>* m_OMFDataTypes; // Stores the type for the block of data containing all the used properties std::map<string, Reading*> m_SuperSetDataPoints; /** * Static data to send to OMF */ std::vector<std::pair<std::string, std::string>> *m_staticData; /** * The version of OMF we are talking */ std::string m_OMFVersion; /** * Support sending properties via links */ bool m_linkedProperties; /** * The state of the linked assets, the key is * either an asset name with an underscore appended * or an asset name, followed by an underscore and a * data point name */ std::unordered_map<std::string, LALookup> m_linkedAssetState; /** * Force the data to be sent using the legacy, complex OMF types */ bool m_legacy; /** * Assets that have been logged as having errors. This prevents us * from flooding the logs with reports for the same asset. */ static std::vector<std::string> m_reportedAssets; /** * Service name */ const std::string m_name; /** * Have base types been sent to the PI Server */ bool m_baseTypesSent; /** * If true, plugin configuration is correct and the PI Server shows no errors * If false, no Readings can be processed until PI is corrected and/or the configuration is updated. */ bool m_PIstable; /** * If true, plugin is connected to the PI Server */ bool m_connected; /** * Number of blocks of Readings from which to send Data at once */ std::size_t m_numBlocks; }; /** * The OMFData class. * A reading is formatted with OMF specifications using the original * type creation scheme implemented by the OMF plugin * * There is no good reason to retain this class any more, it is here * mostly to reduce the scope of the change when introducing the OMFBuffer */ class OMFData { public: OMFData(OMFBuffer & payload, const Reading& reading, string measurementId, bool needDelim, const OMF_ENDPOINT PIServerEndpoint = ENDPOINT_CR, const std::string& DefaultAFLocation = std::string(), OMFHints *hints = NULL); bool hasData() { return m_hasData; }; private: bool m_hasData; }; #endif ================================================ FILE: C/plugins/north/OMF/include/omfbuffer.h ================================================ #ifndef _OMF_BUFFER_H #define _OMF_BUFFER_H /* * Fledge OMF North plugin buffer class * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <list> #define BUFFER_CHUNK 8192 /** * Buffer class designed to hold OMF payloads that can * grow as required but have minimal copy semantics. * * TODO Add a coalesce and compress public entry point */ class OMFBuffer { class Buffer { public: Buffer(); Buffer(unsigned int); ~Buffer(); char *detach(); char *data; unsigned int offset; unsigned int length; bool attached; }; public: OMFBuffer(); ~OMFBuffer(); bool isEmpty() { return buffers.empty() || (buffers.size() == 1 && buffers.front()->offset == 0); } void append(const char); void append(const char *); void append(const int); void append(const unsigned int); void append(const long); void append(const unsigned long); void append(const double); void append(const std::string&); void quote(const std::string&); const char *coalesce(); void clear(); private: std::list<Buffer *> buffers; }; #endif ================================================ FILE: C/plugins/north/OMF/include/omferror.h ================================================ #ifndef _OMFERROR_H #define _OMFERROR_H /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <rapidjson/document.h> #include <string> #include <vector> /** * An encapsulation of an error return from an OMF call. * The class parses the JSON response and gives access to portion of that JSON response. */ class OMFError { public: OMFError(); OMFError(const std::string& json); ~OMFError(); unsigned int messageCount() { return m_messages.size(); }; std::string getMessage(unsigned int offset); std::string getEventReason(unsigned int offset); std::string getEventSeverity(unsigned int offset); std::string getEventExceptionType(unsigned int offset); std::string getEventExceptionMessage(unsigned int offset); int getHttpCode(); void setFromHttpResponse(const std::string& json); /** * The error report contains at least one error level event */ bool hasErrors() { return m_hasErrors; }; /** * The error report contains at least one message */ bool hasMessages() { return !m_messages.empty(); }; bool Log(const std::string &mainMessage, bool filterDuplicates = true); private: class Message { public: Message(const std::string& severity, const std::string& message, const std::string& reason, const std::string& exceptionType, const std::string& exceptionMessage, const int httpCode) : m_severity(severity), m_message(message), m_reason(reason), m_exceptionType(exceptionType), m_exceptionMessage(exceptionMessage), m_httpCode(httpCode) { }; std::string getSeverity() { return m_severity; }; std::string getMessage() { return m_message; }; std::string getReason() { return m_reason; }; std::string getExceptionType() { return m_exceptionType; }; std::string getExceptionMessage() { return m_exceptionMessage; }; int getHttpCode() { return m_httpCode; }; private: std::string m_severity; std::string m_message; std::string m_reason; std::string m_exceptionType; std::string m_exceptionMessage; int m_httpCode; }; std::vector<Message> m_messages; bool m_hasErrors; }; #endif ================================================ FILE: C/plugins/north/OMF/include/omfinfo.h ================================================ #ifndef _OMFINFO_H #define _OMFINFO_H /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2023-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_api.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include <string> #include <logger.h> #include <plugin_exception.h> #include <iostream> #include <omf.h> #include <piwebapi.h> #include <ocs.h> #include <simple_https.h> #include <simple_http.h> #include <config_category.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "json_utils.h" #include "libcurl_https.h" #include "utils.h" #include "string_utils.h" #include <version.h> #include <linkedlookup.h> #include "crypto.hpp" #define PLUGIN_NAME "OMF" #define TYPE_ID_KEY "type-id" #define SENT_TYPES_KEY "sentDataTypes" #define DATA_KEY "dataTypes" #define DATA_KEY_SHORT "dataTypesShort" #define DATA_KEY_HINT "hintChecksum" #define NAMING_SCHEME "namingScheme" #define AFH_HASH "afhHash" #define AF_HIERARCHY "afHierarchy" #define AF_HIERARCHY_ORIG "afHierarchyOrig" #define PROPERTY_TYPE "type" #define PROPERTY_NUMBER "number" #define PROPERTY_STRING "string" #define ENDPOINT_URL_PI_WEB_API "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/piwebapi/omf" #define ENDPOINT_URL_CR "https://HOST_PLACEHOLDER:PORT_PLACEHOLDER/ingress/messages" #define ENDPOINT_URL_OCS "https://REGION_PLACEHOLDER.osisoft.com:PORT_PLACEHOLDER/api/v1/tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" #define ENDPOINT_URL_ADH "https://REGION_PLACEHOLDER.datahub.connect.aveva.com:PORT_PLACEHOLDER/api/v1/Tenants/TENANT_ID_PLACEHOLDER/Namespaces/NAMESPACE_ID_PLACEHOLDER/omf" #define ENDPOINT_URL_EDS "http://localhost:PORT_PLACEHOLDER/api/v1/tenants/default/namespaces/default/omf" #define AUTHORIZATION_URL_ADH "REGION_PLACEHOLDER.datahub.connect.aveva.com" #define AUTHORIZATION_URL_OCS "REGION_PLACEHOLDER.osisoft.com:443" enum OMF_ENDPOINT_PORT { ENDPOINT_PORT_PIWEB_API=443, ENDPOINT_PORT_CR=5460, ENDPOINT_PORT_OCS=443, ENDPOINT_PORT_EDS=5590, ENDPOINT_PORT_ADH=443 }; /** * Plugin specific default configuration */ #define NOT_BLOCKING_ERRORS_DEFAULT QUOTE( \ { \ "errors400" : [ \ "Redefinition of the type with the same ID is not allowed", \ "Invalid value type for the property", \ "Property does not exist in the type definition", \ "Container is not defined", \ "Unable to find the property of the container of type" \ ] \ } \ ) #define NOT_BLOCKING_ERRORS_DEFAULT_PI_WEB_API QUOTE( \ { \ "EventInfo" : [ \ "The specified value is outside the allowable range" \ ] \ } \ ) #define AF_HIERARCHY_RULES QUOTE( \ { \ } \ ) /** * A class that holds the configuration information for the OMF plugin. * * Note this is the first stage of refactoring the OMF plugins and represents * the CONNECTOR_INFO structure of original plugin as a class */ class OMFInformation { public: OMFInformation(ConfigCategory* configData); ~OMFInformation(); void start(const std::string& storedData); uint32_t send(const vector<Reading *>& readings); std::string saveData(); private: void loadSentDataTypes(rapidjson::Document& JSONData); long getMaxTypeId(); int PIWebAPIGetVersion(bool logMessage = true); int EDSGetVersion(bool logMessage = true); int IsADHConnected(bool logMessage = true); void SetOMFVersion(); void CheckDataActionCode(); OMF_ENDPOINT identifyPIServerEndpoint(); std::string saveSentDataTypes(); unsigned long calcTypeShort(const std::string& dataTypes); void ParseProductVersion(std::string &versionString, int *major, int *minor); std::string ParseEDSProductInformation(std::string json); std::string AuthBasicCredentialsGenerate(std::string& userId, std::string& password); void AuthKerberosSetup(std::string& keytabEnv, std::string& keytabFileName); double GetElapsedTime(struct timeval *startTime); bool IsDataArchiveConnected(); void handleOMFTracing(); private: Logger *m_logger; HttpSender *m_sender; // HTTPS connection OMF *m_omf; // OMF data protocol OCS *m_ocs; // ADH and OCS authorization bool m_sendFullStructure; // It sends the minimum OMF structural messages to load data into PI Data Archive if disabled bool m_compression; // whether to compress readings' data string m_protocol; // http / https string m_hostAndPort; // hostname:port for SimpleHttps unsigned int m_retrySleepTime; // Seconds between each retry unsigned int m_maxRetry; // Max number of retries in the communication unsigned int m_timeout; // connect and operation timeout string m_path; // PI Server application path string m_delimiter; // delimiter between Asset and Datapoint in PI data stream names string m_dataActionCode; // Action code to use for OMF Data posts: update or create long m_typeId; // OMF protocol type-id prefix string m_producerToken; // PI Server connector token string m_formatNumber; // OMF protocol Number format string m_formatInteger; // OMF protocol Integer format OMF_ENDPOINT m_PIServerEndpoint; // Defines which End point should be used for the communication NAMINGSCHEME_ENDPOINT m_NamingScheme; // Define how the object names should be generated - https://fledge-iot.readthedocs.io/en/latest/OMF.html#naming-scheme string m_DefaultAFLocation; // 1st hierarchy in Asset Framework, PI Web API only. string m_AFMap; // Defines a set of rules to address where assets should be placed in the AF hierarchy. // https://fledge-iot.readthedocs.io/en/latest/OMF.html#asset-framework-hierarchy-rules string m_prefixAFAsset; // Prefix to generate unique asset id string m_PIWebAPIProductTitle; string m_RestServerVersion; string m_PIWebAPIAuthMethod; // Authentication method to be used with the PI Web API. string m_PIWebAPICredentials; // Credentials is the base64 encoding of id and password joined by a single colon (:) string m_KerberosKeytab; // Kerberos authentication keytab file // stores the environment variable value about the keytab file path // to allow the environment to persist for all the execution of the plugin // // Note : A keytab is a file containing pairs of Kerberos principals // and encrypted keys (which are derived from the Kerberos password). // You can use a keytab file to authenticate to various remote systems // using Kerberos without entering a password. string m_OCSNamespace; // ADH & OCS configurations string m_OCSTenantId; string m_OCSClientId; string m_OCSClientSecret; string m_OCSToken; string m_authUrl; vector<pair<string, string>> m_staticData; // Static data // Errors considered not blocking in the communication with the PI Server std::vector<std::string> m_notBlockingErrors; // Per asset DataTypes std::map<std::string, OMFDataTypes> m_assetsDataTypes; string m_omfversion; bool m_legacy; string m_name; bool m_connected; bool m_tracingEnabled; std::size_t m_numBlocks; }; #endif ================================================ FILE: C/plugins/north/OMF/include/omflinkeddata.h ================================================ #ifndef OMFLINKEDDATA_H #define OMFLINKEDDATA_H /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2022-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <map> #include <set> #include <reading.h> #include <OMFHint.h> #include <omfbuffer.h> #include <linkedlookup.h> #include <omferror.h> /** * The OMFLinkedData class. * A reading is formatted with OMF specifications using the linked * type creation scheme supported for OMF Version 1.2 onwards. * * This is based on the new mechanism discussed at AVEVA World 2022 and * the mechanism is detailed in the Google Doc, * https://docs.google.com/document/d/1w0e7VRqX7xzc0lEBLq-sYhgaHE0ABasOa6EC9dJMrMs/edit * * The principle is to use links to containers in OMF with each container being a single * data point in the asset. There are no specific types for the assets, they share a set * of base types via these links. This should allow for readings that have different sets * of datapoints for each asset. * * It is also a goal of this mechanism to move away from the need to persist state data * between invocations and make the process more robust. */ class OMFLinkedData { public: OMFLinkedData( std::unordered_map<std::string, LALookup> *linkedAssetState, const OMF_ENDPOINT PIServerEndpoint = ENDPOINT_CR) : m_linkedAssetState(linkedAssetState), m_endpoint(PIServerEndpoint), m_doubleFormat("float64"), m_integerFormat("int64") {}; bool processReading(OMFBuffer& payload, bool needDelim, const Reading& reading, const std::string& DefaultAFLocation = std::string(), OMFHints *hints = NULL); void buildLookup(const std::vector<Reading *>& reading); void setSendFullStructure(const bool sendFullStructure) {m_sendFullStructure = sendFullStructure;}; bool flushContainers(HttpSender& sender, const std::string& path, std::vector<std::pair<std::string, std::string> >& header, OMFError& error, bool *isConnected); std::size_t clearLALookup(const std::vector<Reading *>& reading, std::size_t startIndex, std::size_t numReadings, std::string &delimiter); void setDelimiter(const std::string &delimiter) {m_delimiter = delimiter;}; void setFormats(const std::string& doubleFormat, const std::string& integerFormat) { m_doubleFormat = doubleFormat; m_integerFormat = integerFormat; }; void setStaticData(std::vector<std::pair<std::string, std::string>> *staticData) { m_staticData = staticData; }; private: std::string getBaseType(Datapoint *dp, const std::string& format); void sendContainer(std::string& link, Datapoint *dp, OMFHints * hints, const std::string& baseType); bool isTypeSupported(DatapointValue& dataPoint) { switch (dataPoint.getType()) { case DatapointValue::DatapointTag::T_FLOAT: case DatapointValue::DatapointTag::T_INTEGER: case DatapointValue::DatapointTag::T_STRING: return true; default: return false; } }; private: bool m_sendFullStructure; std::string m_delimiter; /** * The container for this asset and data point has been sent in * this session. The key is the asset followed by the datapoint name * with a delimiter (default: '.') in between. The value is the base type used, a * container will be sent if the base type changes. */ std::unordered_map<std::string, LALookup> *m_linkedAssetState; /** * The endpoint to which we are sending data */ OMF_ENDPOINT m_endpoint; /** * Static data to send to OMF */ std::vector<std::pair<std::string, std::string>> *m_staticData; /** * The set of containers to flush */ std::string m_containers; std::set<std::string> m_containerNames; std::string m_doubleFormat; std::string m_integerFormat; }; #endif ================================================ FILE: C/plugins/north/OMF/linkdata.cpp ================================================ /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2022-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <utility> #include <iostream> #include <string> #include <cstring> #include <omf.h> #include <piwebapi.h> #include <OMFHint.h> #include <logger.h> #include "string_utils.h" #include <datapoint.h> #include <iterator> #include <typeinfo> #include <algorithm> #include <omflinkeddata.h> #include <omferror.h> using namespace std; /** * In order to cut down on the number of string copies made whilst building * the OMF message for a reading we reserve a number of bytes in a string and * each time we get close to filling the string we reserve more. The value below * defines the increment we use to grow the string reservation. */ #define RESERVE_INCREMENT 100 /** * Create a comma-separated string from a string set * * @param stringSet Set of strings * @return Set members as a comma-separated string */ static std::string StringSetToCSVString(const std::set<std::string> &stringSet) { std::string stringSetMembers; for (std::string item : stringSet) { stringSetMembers.append(item).append(","); } if (stringSetMembers.size() > 0) { stringSetMembers.resize(stringSetMembers.size() - 1); // remove trailing comma } return stringSetMembers; } /** * Convert a DatapointValue to a string suitable for an OMF Data message * * @param dp Datapoint * @param format OMF data type format to be used for the string * @return Value string for the OMF Data message */ static std::string DatapointValueToOMFString(Datapoint *dp, std::string &format) { // Coerce floating point numbers to integers if requested. // OMF will not accept floating point numbers sent to integer Containers so they must be coerced. // OMF will accept integers sent to floating point Containers so no need to explicitly coerce. // When coercing negative floating point numbers to unsigned integers, set the OMF value to 'null'. std::string omfValueString; if (dp->getData().getType() == DatapointValue::T_FLOAT) { double doubleValue = dp->getData().toDouble(); if (format.compare(0, 6, "Double") == 0) { omfValueString = dp->getData().toString(); // very common; check this first } else if (format.compare(0, 7, "Integer") == 0) { omfValueString = std::to_string((long)doubleValue); } else if (format.compare(0, 8, "UInteger") == 0) { if (doubleValue < 0.0) { omfValueString = std::string("null"); } else { omfValueString = std::to_string((long)doubleValue); } } else { omfValueString = dp->getData().toString(); } } else if ((dp->getData().getType() == DatapointValue::T_INTEGER) && (format.compare(0, 8, "UInteger") == 0)) { if (dp->getData().toInt() < 0) { omfValueString = std::string("null"); } else { omfValueString = dp->getData().toString(); } } else { omfValueString = dp->getData().toString(); } return omfValueString; } /** * OMFLinkedData constructor, generates the OMF message containing the data * * @param payload The buffer into which to populate the payload * @param delim Add a delimiter before outputting anything * @param reading Reading for which the OMF message must be generated * @param AFHierarchyPrefix Unused at the current stage * @param hints OMF hints for the specific reading for changing the behaviour of the operation * */ bool OMFLinkedData::processReading(OMFBuffer& payload, bool delim, const Reading& reading, const string& AFHierarchyPrefix, OMFHints *hints) { bool rval = false; bool changed; string assetName = reading.getAssetName(); string originalAssetName = OMF::ApplyPIServerNamingRulesObj(assetName, NULL); // Apply any TagName hints to modify the containerid if (hints) { const std::vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTagNameHint)) { string hintValue = (*it)->getHint(); Logger::getLogger()->debug("Using OMF TagName hint: %s for asset %s", hintValue.c_str(), assetName.c_str()); assetName = hintValue; } if (typeid(**it) == typeid(OMFTagHint)) { string hintValue = (*it)->getHint(); Logger::getLogger()->debug("Using OMF Tag hint: %s for asset %s", hintValue.c_str(), assetName.c_str()); assetName = hintValue; } } } // Get reading data const vector<Datapoint*> data = reading.getReadingData(); vector<string> skippedDatapoints; Logger::getLogger()->debug("Processing %s (%s) using Linked Types", assetName.c_str(), DataPointNamesAsString(reading).c_str()); assetName = OMF::ApplyPIServerNamingRulesObj(assetName, NULL); bool needDelim = delim; auto assetLookup = m_linkedAssetState->find(originalAssetName + m_delimiter); if (assetLookup == m_linkedAssetState->end()) { // Panic Asset lookup not created Logger::getLogger()->error("Internal error: No asset lookup item for %s.", assetName.c_str()); return ""; } if (m_sendFullStructure && assetLookup->second.assetState(assetName) == false) { if (needDelim) payload.append(','); // Send the data message to create the asset instance payload.append("{ \"typeid\":\"FledgeAsset\", \"values\":[ { \"AssetId\":\""); payload.append(assetName + "\",\"Name\":\""); payload.append(assetName + "\""); for (std::pair<std::string, std::string> &sData : *m_staticData) { payload.append(",\""); payload.append(sData.first); payload.append("\":\""); payload.append(sData.second); payload.append('\"'); } payload.append("} ] }"); rval = true; needDelim = true; assetLookup->second.assetSent(assetName); } /** * This loop creates the data values for each of the datapoints in the * reading. */ for (vector<Datapoint*>::const_iterator it = data.begin(); it != data.end(); ++it) { Datapoint *dp = *it; string dpName = dp->getName(); if (dpName.compare(OMF_HINT) == 0) { // Don't send the OMF Hint to the PI Server continue; } dpName = OMF::ApplyPIServerNamingRulesObj(dpName, NULL); if (!isTypeSupported(dp->getData())) { skippedDatapoints.push_back(dpName); continue; } else { string format; string tagNameHintRaw, tagNameHint; bool tagNameHintchanged = false; if (hints) { const vector<OMFHint *> omfHints = hints->getHints(dpName); for (auto hit = omfHints.cbegin(); hit != omfHints.cend(); hit++) { if (typeid(**hit) == typeid(OMFNumberHint)) { format = (*hit)->getHint(); break; } if (typeid(**hit) == typeid(OMFIntegerHint)) { format = (*hit)->getHint(); break; } if (typeid(**hit) == typeid(OMFTagNameDatapointHint)) { tagNameHintRaw = (*hit)->getHint(); tagNameHint = OMF::ApplyPIServerNamingRulesObj(tagNameHintRaw, &tagNameHintchanged); break; } } } // Create the link for the asset if not already created string link = tagNameHint.empty() ? assetName + m_delimiter + dpName : tagNameHint; string dpLookupName = originalAssetName + m_delimiter + dpName; auto dpLookup = m_linkedAssetState->find(dpLookupName); string baseType = getBaseType(dp, format); if (baseType.empty()) { // Skip the datapoint. // Data type is not supported or the OMFHint is incorrect. // If 'format' is non-empty, a numeric or integer OMFHint was applied. The format string must be invalid. if (format.empty()) { skippedDatapoints.push_back(dpName); } else { skippedDatapoints.push_back(dpName + "[" + format + "]"); } continue; } if (needDelim) { payload.append(','); } else { needDelim = true; } if (dpLookup == m_linkedAssetState->end()) { Logger::getLogger()->error("Trying to send a link for a datapoint for which we have not created a base type"); } else if (dpLookup->second.containerState(assetName) == false) { sendContainer(link, dp, hints, baseType); dpLookup->second.containerSent(assetName, baseType); if (tagNameHintchanged) { Logger::getLogger()->warn("Datapoint %s.%s tagName Hint %s is not a valid PI name. Changed to %s", assetName.c_str(), dpName.c_str(), tagNameHintRaw.c_str(), tagNameHint.c_str()); } } else if (baseType.compare(dpLookup->second.getBaseTypeString()) != 0) { // Land here if the integer or number OMFHint is different from the hint in place // when the Container was first created or confirmed in the current run. // The only way to store data in this case is to reset the format to its initial value. // Attempting to apply the requested format will cause data to be discarded because the // requested format is not defined for the Container. string containerFormat = dpLookup->second.getBaseTypeString(); Logger::getLogger()->debug("%s: Requested format '%s' does not match the Container format '%s'. Resetting to '%s'.", link.c_str(), baseType.c_str(), containerFormat.c_str(), containerFormat.c_str()); baseType = containerFormat; } if (m_sendFullStructure && dpLookup->second.linkState(assetName) == false) { payload.append("{ \"typeid\":\"__Link\","); payload.append("\"values\":[ { \"source\" : {"); payload.append("\"typeid\": \"FledgeAsset\","); payload.append("\"index\":\"" + assetName); payload.append("\" }, \"target\" : {"); payload.append("\"containerid\" : \""); payload.append(link); payload.append("\" } } ] },"); rval = true; dpLookup->second.linkSent(assetName); } // Convert reading data into the OMF JSON string payload.append("{\"containerid\": \"" + link); payload.append("\", \"values\": [{"); // Base type we are using for this data point payload.append("\"" + baseType + "\": "); // Add datapoint Value as a string to the payload. payload.append(DatapointValueToOMFString(dp, baseType)); payload.append(", "); // Append Z to getAssetDateTime(FMT_STANDARD) payload.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); payload.append("} ] }"); rval = true; } } if (skippedDatapoints.size() > 0) { string points; for (string& dp : skippedDatapoints) { if (!points.empty()) points.append(", "); points.append(dp); } auto pos = points.find_last_of(","); if (pos != string::npos) { points.replace(pos, 1, " and"); } string assetName = reading.getAssetName(); string msg = "The asset " + assetName + " had a number of datapoints (" + points + ") that are not supported by OMF and have been omitted"; OMF::reportAsset(assetName, "warn", msg); } return rval; } /** * If the entries are needed in the lookup table for this block of readings then create them * * @param readings A block of readings to process */ void OMFLinkedData::buildLookup(const vector<Reading *>& readings) { for (const Reading *reading : readings) { string assetName = reading->getAssetName(); assetName = OMF::ApplyPIServerNamingRulesObj(assetName, NULL); // Apply any TagName hints to modify the containerid LALookup empty; string assetKey = assetName + m_delimiter; if (m_linkedAssetState->count(assetKey) == 0) m_linkedAssetState->insert(pair<string, LALookup>(assetKey, empty)); // Get reading data const vector<Datapoint*> data = reading->getReadingData(); /** * This loop creates the data values for each of the datapoints in the * reading. */ for (vector<Datapoint*>::const_iterator it = data.begin(); it != data.end(); ++it) { Datapoint *dp = *it; string dpName = dp->getName(); if (dpName.compare(OMF_HINT) == 0) { // Don't send the OMF Hint to the PI Server continue; } dpName = OMF::ApplyPIServerNamingRulesObj(dpName, NULL); if (!isTypeSupported(dp->getData())) { continue; } string link = assetName + m_delimiter + dpName; if (m_linkedAssetState->count(link) == 0) m_linkedAssetState->insert(pair<string, LALookup>(link, empty)); } } } /** * Calculate the base type we need to link the container * * @param dp The datapoint to process * @param format The format to use based on a hint, this may be empty * @return The base type linked in the container */ string OMFLinkedData::getBaseType(Datapoint *dp, const string& format) { string baseType; switch (dp->getData().getType()) { case DatapointValue::T_STRING: baseType = "String"; break; case DatapointValue::T_INTEGER: { string intFormat; if (!format.empty()) intFormat = format; else intFormat = m_integerFormat; if (intFormat.compare("int64") == 0) baseType = "Integer64"; else if (intFormat.compare("int32") == 0) baseType = "Integer32"; else if (intFormat.compare("int16") == 0) baseType = "Integer16"; else if (intFormat.compare("uint64") == 0) baseType = "UInteger64"; else if (intFormat.compare("uint32") == 0) baseType = "UInteger32"; else if (intFormat.compare("uint16") == 0) baseType = "UInteger16"; else if (intFormat.compare("float64") == 0) baseType = "Double64"; else if (intFormat.compare("float32") == 0) baseType = "Double32"; break; } case DatapointValue::T_FLOAT: { string doubleFormat; if (!format.empty()) doubleFormat = format; else doubleFormat = m_doubleFormat; if (doubleFormat.compare("float64") == 0) baseType = "Double64"; else if (doubleFormat.compare("float32") == 0) baseType = "Double32"; else if (doubleFormat.compare("int64") == 0) baseType = "Integer64"; else if (doubleFormat.compare("int32") == 0) baseType = "Integer32"; else if (doubleFormat.compare("int16") == 0) baseType = "Integer16"; else if (doubleFormat.compare("uint64") == 0) baseType = "UInteger64"; else if (doubleFormat.compare("uint32") == 0) baseType = "UInteger32"; else if (doubleFormat.compare("uint16") == 0) baseType = "UInteger16"; break; } default: Logger::getLogger()->error("Unsupported type %s for the data point %s", dp->getData().getTypeStr().c_str(), dp->getName().c_str()); break; } return baseType; } /** * Create a container message for the linked datapoint * * @param linkName The name to use for the container * @param dp The datapoint to process * @param hints Hints related to this asset * @param baseType The baseType we will use */ void OMFLinkedData::sendContainer(string& linkName, Datapoint *dp, OMFHints * hints, const string& baseType) { string dataSource = "Fledge"; string uom, minimum, maximum, interpolation; bool propertyOverrides = false; if (hints) { const vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.end(); it++) { if (typeid(**it) == typeid(OMFSourceHint)) { dataSource = (*it)->getHint(); } } const vector<OMFHint *> dpHints = hints->getHints(dp->getName()); for (auto it = dpHints.cbegin(); it != dpHints.end(); it++) { if (typeid(**it) == typeid(OMFSourceHint)) { dataSource = (*it)->getHint(); } if (typeid(**it) == typeid(OMFUOMHint)) { uom = (*it)->getHint(); propertyOverrides = true; } if (typeid(**it) == typeid(OMFMinimumHint)) { minimum = (*it)->getHint(); propertyOverrides = true; } if (typeid(**it) == typeid(OMFMaximumHint)) { maximum = (*it)->getHint(); propertyOverrides = true; } if (typeid(**it) == typeid(OMFInterpolationHint)) { interpolation = (*it)->getHint(); propertyOverrides = true; } } } string container = "{ \"id\" : \"" + linkName; container += "\", \"typeid\" : \""; container += baseType; container += "\", \"name\" : \""; string dpName = OMF::ApplyPIServerNamingRulesObj(dp->getName(), NULL); container += dpName; container += "\", \"datasource\" : \"" + dataSource + "\""; if (propertyOverrides) { container += ", \"propertyoverrides\" : { \""; container += baseType; container += "\" : {"; string delim = ""; if (!uom.empty()) { delim = ","; container += "\"uom\" : \""; container += uom; container += "\""; } if (!minimum.empty()) { container += delim; delim = ","; container += "\"minimum\" : "; container += minimum; } if (!maximum.empty()) { container += delim; delim = ","; container += "\"maximum\" : "; container += maximum; } if (!interpolation.empty()) { container += delim; delim = ","; container += "\"interpolation\" : \""; container += interpolation; container += "\""; } container += "} }"; } container += "}"; m_containerNames.insert(linkName); Logger::getLogger()->debug("Built container: %s", container.c_str()); if (! m_containers.empty()) m_containers += ","; m_containers.append(container); } /** * Flush the container definitions that have been built up * * @param sender HTTP client * @param path REST server URL * @param header REST call headers * @param error OMFError object with parsed PI Web API HTTP response * @param isConnected Set to false if REST call shows loss of connection to PI * @return true if the containers were successfully flushed */ bool OMFLinkedData::flushContainers(HttpSender& sender, const string& path, vector<pair<string, string> >& header, OMFError& error, bool *isConnected) { if (m_containers.empty()) return true; // Nothing to flush string payload = "[" + m_containers + "]"; m_containers = ""; Logger::getLogger()->debug("Flush container information: %s", payload.c_str()); // Write to OMF endpoint try { int res = sender.sendRequest("POST", path, header, payload); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("An error occurred sending the container data. HTTP code %d - %s %s", res, sender.getHostPort().c_str(), sender.getHTTPResponse().c_str()); if (!m_containerNames.empty()) { Logger::getLogger()->warn("Containers attempted: %s", StringSetToCSVString(m_containerNames).c_str()); } return false; } else if (res == 201) { Logger::getLogger()->info("Containers created: %s", StringSetToCSVString(m_containerNames).c_str()); } else { Logger::getLogger()->info("Containers confirmed: %s", StringSetToCSVString(m_containerNames).c_str()); } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { error.setFromHttpResponse(sender.getHTTPResponse()); if (error.Log("The OMF endpoint reported a Bad Request when sending Containers") == false) { Logger::getLogger()->error("HTTP 400: Bad Request when sending Containers. Exception: %s", e.what()); } if (!m_containerNames.empty()) { Logger::getLogger()->warn("Containers attempted: %s", StringSetToCSVString(m_containerNames).c_str()); } return false; } catch (const Conflict& e) { error.setFromHttpResponse(sender.getHTTPResponse()); if (error.Log("The OMF endpoint reported a Conflict when sending Containers") == false) { Logger::getLogger()->error("HTTP 409: Conflict when sending Containers. Exception: %s", e.what()); } if (!m_containerNames.empty()) { Logger::getLogger()->warn("Containers attempted: %s", StringSetToCSVString(m_containerNames).c_str()); } return false; } catch (const std::exception &e) { error.setFromHttpResponse(sender.getHTTPResponse()); if (error.hasMessages()) { error.Log("An exception occurred when sending container information to the OMF endpoint"); if (error.getHttpCode() == 503) { *isConnected = false; Logger::getLogger()->warn("HTTP 503: REST service unavailable"); } } else { PIWebAPI piwebapi; std::string errorMessage = piwebapi.errorMessageHandler(e.what()); Logger::getLogger()->error("An exception occurred when sending container information to the OMF endpoint, %s - %s %s", errorMessage.c_str(), sender.getHostPort().c_str(), path.c_str()); } if (!m_containerNames.empty()) { Logger::getLogger()->warn("Containers attempted: %s", StringSetToCSVString(m_containerNames).c_str()); } // Check for any error messages that indicate a loss of connection int i = 0; while (strlen(noConnectionErrorMessages[i])) { if (0 == strncmp(e.what(), noConnectionErrorMessages[i], strlen(noConnectionErrorMessages[i]))) { *isConnected = false; Logger::getLogger()->warn("Connection to the destination data archive has been lost"); break; } i++; } return false; } return true; } /** * Clear selected Reading and Datapoint information from the linked asset state map * * @param readings Vector of Readings * @param startIndex Start index into Readings * @param numReadings Number of Readings to clear * @return Number of asset and datapoint entries cleared */ std::size_t OMFLinkedData::clearLALookup(const std::vector<Reading *> &readings, std::size_t startIndex, std::size_t numReadings, std::string &delimiter) { std::size_t numCleared = 0; LALookup empty; for (std::size_t i = startIndex; (i < numReadings) && (i < readings.size()); i++) { Reading *reading = readings[i]; std::string assetNameDelim = OMF::ApplyPIServerNamingRulesObj(reading->getAssetName(), NULL) + delimiter; // Check if the asset key is present in the linked asset state map auto assetIterator = m_linkedAssetState->find(assetNameDelim); if (assetIterator != m_linkedAssetState->end()) { assetIterator->second = empty; numCleared++; } // Check if datapoint keys are present in the linked asset state map for (Datapoint *datapoint : reading->getReadingData()) { std::string dpName = OMF::ApplyPIServerNamingRulesObj(datapoint->getName(), NULL); auto datappointIterator = m_linkedAssetState->find(assetNameDelim + dpName); if (datappointIterator != m_linkedAssetState->end()) { datappointIterator->second = empty; numCleared++; } } } return numCleared; } /** * Set the base type by passing the string of the base type */ void LALookup::setBaseType(const string& baseType) { if (baseType.compare("Double64") == 0) m_baseType = OMFBT_DOUBLE64; else if (baseType.compare("Double32") == 0) m_baseType = OMFBT_DOUBLE32; else if (baseType.compare("Integer16") == 0) m_baseType = OMFBT_INTEGER16; else if (baseType.compare("Integer32") == 0) m_baseType = OMFBT_INTEGER32; else if (baseType.compare("Integer64") == 0) m_baseType = OMFBT_INTEGER64; else if (baseType.compare("UInteger16") == 0) m_baseType = OMFBT_UINTEGER16; else if (baseType.compare("UInteger32") == 0) m_baseType = OMFBT_UINTEGER32; else if (baseType.compare("UInteger64") == 0) m_baseType = OMFBT_UINTEGER64; else if (baseType.compare("String") == 0) m_baseType = OMFBT_STRING; else if (baseType.compare("FledgeAsset") == 0) m_baseType = OMFBT_FLEDGEASSET; else Logger::getLogger()->fatal("Unable to map base type '%s'", baseType.c_str()); } /** * The container has been sent with the specific base type * * @param tagName The name of the tag we are using * @param baseType The baseType we resolve to */ void LALookup::containerSent(const std::string& tagName, OMFBaseType baseType) { if (m_tagName.compare(tagName)) { // Force a new Link and AF Link to be sent for the new tag name m_sentState &= ~(LAL_LINK_SENT | LAL_AFLINK_SENT); } m_baseType = baseType; m_tagName = tagName; m_sentState |= LAL_CONTAINER_SENT; } /** * The container has been sent with the specific base type * * @param tagName The name of the tag we are using * @param baseType The baseType we resolve to */ void LALookup::containerSent(const std::string& tagName, const std::string& baseType) { setBaseType(baseType); if (m_tagName.compare(tagName)) { // Force a new Link and AF Link to be sent for the new tag name m_sentState &= ~(LAL_LINK_SENT | LAL_AFLINK_SENT); } m_tagName = tagName; m_sentState |= LAL_CONTAINER_SENT; } /** * Get a string representation of the base type that was sent */ string LALookup::getBaseTypeString() { switch (m_baseType) { case OMFBT_UNKNOWN: return "Unknown"; case OMFBT_DOUBLE64: return "Double64"; case OMFBT_DOUBLE32: return "Double32"; case OMFBT_INTEGER16: return "Integer16"; case OMFBT_INTEGER32: return "Integer32"; case OMFBT_INTEGER64: return "Integer64"; case OMFBT_UINTEGER16: return "UInteger16"; case OMFBT_UINTEGER32: return "UInteger32"; case OMFBT_UINTEGER64: return "UInteger64"; case OMFBT_STRING: return "String"; default: return "Unknown"; } } ================================================ FILE: C/plugins/north/OMF/ocs.cpp ================================================ /* * Fledge OSIsoft ADH and OCS integration. * Implements the integration for the specific functionalities exposed by ADH and OCS * * Copyright (c) 2020-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <string> #include <vector> #include <utility> #include <ocs.h> #include <string_utils.h> #include <logger.h> #include <simple_https.h> #include <rapidjson/document.h> #include "rapidjson/error/en.h" using namespace std; using namespace rapidjson; OCS::OCS(const std::string &authorizationUrl) : m_authUrl(authorizationUrl), m_nextAuthentication(std::chrono::steady_clock::time_point()) { } // Destructor OCS::~OCS() { } /** * Extracts the OCS token from the JSON returned by the OCS API * * @param response JSON message generated by the OCS API containing the OCS token * */ void OCS::extractToken(const string &response) { Document JSon; ParseResult ok = JSon.Parse(response.c_str()); if (!ok) { Logger::getLogger()->error("OCS token extract, invalid json - HTTP response :%s:", response.c_str()); } else { if (JSon.HasMember("access_token")) { m_token = JSon["access_token"].GetString(); } if (JSon.HasMember("expires_in")) { m_expiresIn = JSon["expires_in"].GetUint(); Logger::getLogger()->debug("ADH token expires in %u seconds", m_expiresIn); } else { m_expiresIn = 0; Logger::getLogger()->warn("ADH authentication response does not include 'expires_in'"); } } } /** * Calls the OCS/ADH API to retrieve the authentication token related to the the clientId and clientSecret * * @param clientId Client Id code assigned by OCS/ADH using its GUI to the specific connection * @param clientSecret Client Secret code assigned by OCS/ADH using its GUI to the specific connection * @param logMessage If true, log error messages (default: true) * @return HTTP Code * */ int OCS::retrieveToken(const string& clientId, const string& clientSecret, bool logMessage) { string response; string payload; HttpSender *endPoint; vector<pair<string, string>> header; int httpCode = 400; endPoint = new SimpleHttps(m_authUrl, TIMEOUT_CONNECT, TIMEOUT_REQUEST, RETRY_SLEEP_TIME, 0); header.push_back( std::make_pair("Content-Type", "application/x-www-form-urlencoded")); header.push_back( std::make_pair("Accept", " text/plain")); payload = PAYLOAD_RETRIEVE_TOKEN; StringReplace(payload, "CLIENT_ID_PLACEHOLDER", urlEncode(clientId)); StringReplace(payload, "CLIENT_SECRET_ID_PLACEHOLDER", urlEncode(clientSecret)); // Anonymous auth string authMethod = "a"; endPoint->setAuthMethod (authMethod); try { httpCode = endPoint->sendRequest("POST", URL_RETRIEVE_TOKEN, header, payload); response = endPoint->getHTTPResponse(); if (httpCode >= 200 && httpCode <= 399) { extractToken(response); Logger::getLogger()->debug("ADH authentication token of %u characters retrieved", m_token.size()); } else if (logMessage) { Logger::getLogger()->error("Error in retrieving the authentication token from ADH - http :%d: :%s: ", httpCode, response.c_str()); } } catch (const Unauthorized &e) { // Log authentication failures regardless of 'logMessage' Logger::getLogger()->error("Unable to authenticate with AVEVA Data Hub"); httpCode = 401; } catch (exception &ex) { if (logMessage) { Logger::getLogger()->error("Error in retrieving the authentication token from ADH - error :%s: ", ex.what()); } } delete endPoint; return httpCode; } /** * Calls the ADH API to retrieve the authentication token * * @param logMessage If true, log error messages (default: true) * @return token Authorization token */ string OCS::OCSRetrieveAuthToken(const string &clientId, const string &clientSecret, bool logMessage) { std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); if (now >= m_nextAuthentication) { int httpCode = retrieveToken(clientId, clientSecret, logMessage); if (httpCode >= 200 && httpCode <= 399) { // Set the next authentication check time only if this attempt was successful. // Otherwise, leave the next authentication check as-is so retry will be immediate. // Authentication check time is half the expiry time to avoid being near the deadline. m_nextAuthentication = now + std::chrono::seconds(m_expiresIn / 2); } return m_token; } else { return m_token; } } ================================================ FILE: C/plugins/north/OMF/omf.cpp ================================================ /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2018-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <utility> #include <iostream> #include <string> #include <cstring> #include <omf.h> #include <OMFHint.h> #include <logger.h> #include <zlib.h> #include <rapidjson/document.h> #include "rapidjson/error/en.h" #include "string_utils.h" #include <plugin_api.h> #include <string_utils.h> #include <datapoint.h> #include <thread> #include <piwebapi.h> #include <algorithm> #include <vector> #include <iterator> #include <basetypes.h> #include <omflinkeddata.h> #include <audit_logger.h> using namespace std; using namespace rapidjson; static bool isTypeSupported(DatapointValue& dataPoint); vector<string> OMF::m_reportedAssets; // 1 enable performance tracking #define INSTRUMENT 0 #define AFHierarchySeparator '/' #define AF_TYPES_SUFFIX "-type" // The asset name is composed by: asset name + AF_TYPES_SUFFIX + incremental id of the type // Handling escapes for AF Hierarchies #define AFH_SLASH "/" #define AFH_SLASH_ESCAPE "@/" #define AFH_SLASH_ESCAPE_TMP "##" #define AFH_ESCAPE_SEQ "@@" #define AFH_ESCAPE_CHAR "@" // Structures to generate and assign the 1st level of AF hierarchy if the end point is PI Web API // _placeholder_ will be replaced with the proper value const char *AF_HIERARCHY_1LEVEL_TYPE = QUOTE( [ { "id": "_placeholder_typeid_", "version": "1.0.0.0", "type": "object", "classification": "static", "properties": { "Name": { "type": "string", "isname": true }, "AssetId": { "type": "string", "isindex": true } } } ] ); // Structures to generate the OMF message for handling static information // _placeholder_ will be replaced with the proper value const char *AF_HIERARCHY_1LEVEL_STATIC = QUOTE( [ { "typeid": "_placeholder_typeid_", "values": [ { "Name": "_placeholder_Name_", "AssetId": "_placeholder_AssetId_" } ] } ] ); // Structures to generate the OMF message for handling link information // _placeholder_ will be replaced with the proper value const char *AF_HIERARCHY_LEVEL_LINK = QUOTE( [ { "typeid": "__Link", "values": [ { "source": { "typeid": "_placeholder_src_type_", "index": "_placeholder_src_idx_" }, "target": { "typeid": "_placeholder_tgt_type_", "index": "_placeholder_tgt_idx_" } } ] } ] ); // Structures to generate the OMF message for handling the link information for the first level of the AF hierarchy // _placeholder_ will be replaced with the proper value const char *AF_HIERARCHY_1LEVEL_LINK = QUOTE( { "source": { "typeid": "_placeholder_src_type_", "index": "_placeholder_src_idx_" }, "target": { "typeid": "_placeholder_tgt_type_", "index": "_placeholder_tgt_idx_" } } ); /** * Parse "index" and "containerid" values from JSON containing link "source" and "target" * * @param json JSON text as char string * @param links Vector of source-target name pairs */ static void parseLinkData(const char *json, std::vector<std::pair<std::string, std::string>> &links) { Document doc; if (doc.Parse(json).HasParseError()) { Logger::getLogger()->error("parseLinkData error %d: failed to parse %s", (int)doc.GetParseError(), json); } else if (doc.IsArray()) // top level of the document should be an array { for (auto &it : doc.GetArray()) // top-level array has one unnamed object { if (it.IsObject() && it.HasMember("values")) { for (auto &it2 : it.GetObject()["values"].GetArray()) // each object in the "values" array has "source" and "target" { if (it2.IsObject() && it2.HasMember("source") && it2.HasMember("target")) { auto sourceObject = it2["source"].GetObject(); if (sourceObject.HasMember("index")) { std::string sourceString = sourceObject["index"].GetString(); std::string targetString; auto targetObject = it2["target"].GetObject(); if (targetObject.HasMember("index")) { targetString = targetObject["index"].GetString(); } else if (targetObject.HasMember("containerid")) { targetString = targetObject["containerid"].GetString(); } if (!sourceString.empty() && !targetString.empty()) { links.push_back(std::make_pair(sourceString, targetString)); } } } } } } } } /** * Parse "index" and "containerid" values from JSON containing link "source" and "target" * * @param json JSON text as std::string * @param links Vector of source-target name pairs */ static void parseLinkData(const std::string &json, std::vector<std::pair<std::string, std::string>> &links) { parseLinkData(json.c_str(), links); } /** * Parse "Name" from JSON containing a Data message with element definitions * * @param json JSON text * @return Name string if present, otherwise an empty string */ static std::string parseNameFromJson(const std::string &json) { std::string name; Document doc; if (doc.Parse(json.c_str()).HasParseError()) { Logger::getLogger()->error("parseNameFromJson error %d: failed to parse %s", (int)doc.GetParseError(), json.c_str()); } else if (doc.IsArray()) // top level of the document should be an array { for (auto &it : doc.GetArray()) // top-level array has one unnamed object { if (it.IsObject() && it.HasMember("values")) { for (auto &it2 : it.GetObject()["values"].GetArray()) // any object in the "values" array has at least "Name" and "AssetId" { if (it2.IsObject() && it2.HasMember("Name")) { name = it2["Name"].GetString(); } } } } } return name; } /** * Parse "ids" from JSON with Type or Container definitions. * This method can return a single TypeId. * * @param json JSON text * @param Ids Array of Type or Container ids * @param typeIdPtr TypeId associated with the (single) Id (optional) */ static void parseIdFromJson(const std::string &json, std::vector<std::string> &Ids, std::string *typeIdPtr = NULL) { Document doc; if (doc.Parse(json.c_str()).HasParseError()) { Logger::getLogger()->error("parseIdFromJson error %d: failed to parse %s", (int)doc.GetParseError(), json.c_str()); } else if (doc.IsArray()) // top level of the document should be an array { for (auto &it : doc.GetArray()) // array has unnamed objects, one per type { if (it.IsObject()) { if (it.HasMember("id")) { Ids.push_back(it["id"].GetString()); } if (typeIdPtr && typeIdPtr->empty() && it.HasMember("typeid")) { typeIdPtr->assign(it["typeid"].GetString()); } } } } } /** * Log an array of "ids" with a message after a REST call. * Include the HTTP code in the message if it represents an error. * * @param httpCode HTTP return code from the REST operation. If zero, there is no return code available. * @param message Message used to label the logged ids * @param Ids Array of Id strings */ static void LogIds(const int httpCode, const char *message, std::vector<std::string> &Ids) { if ((httpCode >= 400) && (httpCode < 600)) { for (std::string &Id : Ids) { Logger::getLogger()->error("Error %d %s %s", httpCode, message, Id.c_str()); } } else if (httpCode == 0) { for (std::string &Id : Ids) { Logger::getLogger()->error("%s %s", message, Id.c_str()); } } else { for (std::string &Id : Ids) { Logger::getLogger()->info("%s %s", message, Id.c_str()); } } } /** * Log an array of Links which means source-target pairs. * Include the HTTP code in the message if it represents an error. * * @param httpCode HTTP return code from the REST operation * @param message Message used to label the logged links * @param links Array of source-target string pairs */ static void LogLinks(const int httpCode, const char *message, std::vector<std::pair<std::string, std::string>> &links) { if ((httpCode >= 400) && (httpCode < 600)) { for (std::pair<std::string, std::string> &it : links) { Logger::getLogger()->error("Error %d %s %s to %s", httpCode, message, it.first.c_str(), it.second.c_str()); } } else if (httpCode == 0) { for (std::pair<std::string, std::string> &it : links) { Logger::getLogger()->error("Error %s %s to %s", message, it.first.c_str(), it.second.c_str()); } } else { for (std::pair<std::string, std::string> &it : links) { Logger::getLogger()->info("%s %s to %s", message, it.first.c_str(), it.second.c_str()); } } } /** * Extracts an HTTP code from an error message formatted by HttpSender. * The message format is "HTTP code |nnn| HTTP error ..." * * @param msg Msg from which the HTTP code must be extracted * @return HTTP Code from the message. Returns 200 if no HTTP code was found. * */ static int HTTPCodeFromErrorMessage(const string &msg) { string::size_type pos, pos1, pos2; int httpCode = 200; pos = msg.find("HTTP code |"); if (pos != string::npos) { pos1 = msg.find("|", pos); pos2 = msg.find("|", pos1 + 1); if (pos2 != string::npos) { std::string httpCodeString = msg.substr(pos1 + 1, pos2 - pos1 - 1); httpCode = std::stoi(httpCodeString); } } return httpCode; } /** * OMFData constructor, generates the OMF message containing the data * * @param reading Reading for which the OMF message must be generated * @param measurementId Name/Reference of the object of the Data Archive at which the data must be assigned * @param PIServerEndpoint End point for which the OMF message must be prepared among: PIWebAPI, ADH, OCS, EDS... * @param AFHierarchyPrefix Unused at the current stage * @param hints OMF hints for the specific reading for changing the behaviour of the operation * */ OMFData::OMFData(OMFBuffer& payload, const Reading& reading, string measurementId, bool delim, const OMF_ENDPOINT PIServerEndpoint,const string& AFHierarchyPrefix, OMFHints *hints) { bool changed; Logger::getLogger()->debug("%s - measurementId :%s: ", __FUNCTION__, measurementId.c_str()); // Apply any TagName hints to modify the containerid if (hints) { const std::vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTagNameHint)) { measurementId = (*it)->getHint(); Logger::getLogger()->debug("Using OMF TagName hint: %s", measurementId.c_str()); } if (typeid(**it) == typeid(OMFTagHint)) { measurementId = (*it)->getHint(); Logger::getLogger()->debug("Using OMF Tag hint: %s", measurementId.c_str()); } } } // Get reading data const vector<Datapoint*> data = reading.getReadingData(); m_hasData = false; // Check if there are any datapoints to send for (vector<Datapoint*>::const_iterator it = data.begin(); it != data.end(); ++it) { string dpName = (*it)->getName(); if (dpName.compare(OMF_HINT) == 0) { // Don't send the OMF Hint to the PI Server continue; } if (isTypeSupported((*it)->getData())) { m_hasData = true; break; } } if (m_hasData) { if (delim) { payload.append(", "); } // Convert reading data into the OMF JSON string payload.append("{\"containerid\": \"" + measurementId); payload.append("\", \"values\": [{"); /** * This loop creates: * "dataName": {"type": "dataType"}, */ for (vector<Datapoint*>::const_iterator it = data.begin(); it != data.end(); ++it) { string dpName = (*it)->getName(); if (dpName.compare(OMF_HINT) == 0) { // Don't send the OMF Hint to the PI Server continue; } if (!isTypeSupported((*it)->getData())) { continue; } else { // Add datapoint Name payload.append("\"" + OMF::ApplyPIServerNamingRulesObj(dpName, nullptr) + "\": " + (*it)->getData().toString()); payload.append(", "); } } // Append Z to getAssetDateTime(FMT_STANDARD) payload.append("\"Time\": \"" + reading.getAssetDateUserTime(Reading::FMT_STANDARD) + "Z" + "\""); payload.append("}]}"); } } /** * OMF constructor */ OMF::OMF(const string& name, HttpSender& sender, const string& path, const long id, const string& token) : m_path(path), m_typeId(id), m_producerToken(token), m_sender(sender), m_legacy(false), m_name(name), m_baseTypesSent(false), m_linkedProperties(true), m_connected(true), m_PIstable(true), m_numBlocks(1) { m_changeTypeId = false; m_OMFDataTypes = NULL; m_OMFVersion = "1.0"; m_reportedAssets.clear(); } /** * OMF constructor with per asset data types */ OMF::OMF(const string& name, HttpSender& sender, const string& path, map<string, OMFDataTypes>& types, const string& token) : m_path(path), m_OMFDataTypes(&types), m_producerToken(token), m_sender(sender), m_name(name), m_baseTypesSent(false), m_linkedProperties(true), m_connected(true), m_PIstable(true), m_numBlocks(1) { // Get starting type-id sequence or set the default value auto it = (*m_OMFDataTypes).find(FAKE_ASSET_KEY); m_typeId = (it != (*m_OMFDataTypes).end()) ? (*it).second.typeId : TYPE_ID_DEFAULT; m_changeTypeId = false; m_reportedAssets.clear(); } // Destructor OMF::~OMF() { } /** * Compress a string * * @param str Input STL string that is to be compressed * @param compressionlevel zlib/gzip Compression level * @return str gzip compressed binary data */ std::string OMF::compress_string(const std::string& str, int compressionlevel) { const int windowBits = 15; const int GZIP_ENCODING = 16; z_stream zs; // z_stream is zlib's control structure memset(&zs, 0, sizeof(zs)); if (deflateInit2(&zs, compressionlevel, Z_DEFLATED, windowBits | GZIP_ENCODING, 8, Z_DEFAULT_STRATEGY) != Z_OK) throw(std::runtime_error("deflateInit failed while compressing.")); zs.next_in = (Bytef*)str.data(); zs.avail_in = str.size(); // set the z_stream's input int ret; char outbuffer[32768]; std::string outstring; // retrieve the compressed bytes blockwise do { zs.next_out = reinterpret_cast<Bytef*>(outbuffer); zs.avail_out = sizeof(outbuffer); ret = deflate(&zs, Z_FINISH); if (outstring.size() < zs.total_out) { // append the block to the output string outstring.append(outbuffer, zs.total_out - outstring.size()); } } while (ret == Z_OK); deflateEnd(&zs); if (ret != Z_STREAM_END) { // an error occurred that was not EOF std::ostringstream oss; oss << "Exception during zlib compression: (" << ret << ") " << zs.msg; throw(std::runtime_error(oss.str())); } return outstring; } /** * Sends all the data type messages for a Reading data row * * @param row The current Reading data row * @return True if all data types have been sent (HTTP 2xx OK) * False when first error occurs. */ bool OMF::sendDataTypes(const Reading& row, OMFHints *hints) { int res; m_changeTypeId = false; // Create header for Type vector<pair<string, string>> resType = OMF::createMessageHeader("Type"); // Create data for Type message and parse the types for logging purposes string typeData = OMF::createTypeData(row, hints); std::vector<std::string> Ids; parseIdFromJson(typeData, Ids); // If Datatype in Reading row is not supported, just return true if (typeData.empty()) { return true; } else { // TODO: ADD LOG } // Build an HTTPS POST with 'resType' headers // and 'typeData' JSON payload // Then get HTTPS POST ret code and return 0 to client on error string assetName = row.getAssetName(); try { res = m_sender.sendRequest("POST", m_path, resType, typeData); if ( ! (res >= 200 && res <= 299) ) { LogIds(res, "creating Type", Ids); string msg = "An error occurred sending the dataType message for the asset " + assetName; msg.append(". HTTP error code " + to_string(res)); reportAsset(assetName, "error", msg); return false; } else { LogIds(res, (res == 201) ? "Created Type" : "Confirmed Type", Ids); } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { LogIds(400, "creating Type", Ids); string msg = "The OMF endpoint reported a Bad Request when sending Types for the asset " + assetName; handleRESTException(e, msg.c_str()); if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; Logger::getLogger()->warn("A data type change will take place to try to resolve this error"); } reportAsset(assetName, "error", "The OMF endpoint reported a Bad Request when sending Types"); return false; } catch (const Unauthorized& e) { LogIds(401, "creating Type", Ids); Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); return false; } catch (const Conflict &e) { LogIds(409, "creating Type", Ids); string msg = "Type conflict for " + assetName + " (" + DataPointNamesAsString(row) + "). Creating a new Type"; handleRESTException(e, msg.c_str()); if (!OMF::handleTypeErrors(assetName, row, hints)) { return false; } } catch (const std::exception &e) { LogIds(0, "Error creating Type", Ids); string msg = "An error occurred sending the Type message for the asset " + assetName; handleRESTException(e, msg.c_str()); return false; } // Create header for Container vector<pair<string, string>> resContainer = OMF::createMessageHeader("Container"); // Create data for Container message string typeContainer = OMF::createContainerData(row, hints); string measurementId = generateMeasurementId(assetName); // Parse the Container Id and the Container Type Id from the JSON payload. // There will be only 1. Ids.clear(); std::string containerTypeId; parseIdFromJson(typeContainer, Ids, &containerTypeId); // Build an HTTPS POST with 'resContainer' headers // and 'typeContainer' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { res = m_sender.sendRequest("POST", m_path, resContainer, typeContainer); if ( ! (res >= 200 && res <= 299) ) { LogIds(res, "creating Container", Ids); string msg = "An error occurred sending the dataType container message for the asset " + assetName + " (Type: " + containerTypeId + ")"; msg.append(". HTTP error code " + to_string(res)); reportAsset(assetName, "error", msg); return false; } else if (res == 201) { LogIds(res, std::string("Created Container (Type: " + containerTypeId + ")").c_str(), Ids); } else { LogIds(res, std::string("Confirmed Container (Type: " + containerTypeId + ")").c_str(), Ids); } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest &e) { LogIds(400, "creating Container", Ids); string msg = "The OMF endpoint reported a Bad Request when sending Containers for the asset " + assetName + " (Type: " + containerTypeId + ")"; handleRESTException(e, msg.c_str()); if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; Logger::getLogger()->warn("A data type change will take place to try to resolve this error"); } reportAsset(assetName, "error", "The OMF endpoint reported a Bad Request when sending Containers"); return false; } catch (const Unauthorized &e) { LogIds(401, "creating Container", Ids); Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); return false; } catch (const Conflict &e) { LogIds(409, "creating Container", Ids); string msg = "A Conflict occurred sending the Container message for the asset " + assetName + " (Type: " + containerTypeId + ")"; handleRESTException(e, msg.c_str()); Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, 409); m_PIstable = false; return false; } catch (const std::exception &e) { LogIds(0, "creating Container", Ids); OMFError error; string msg = "An error occurred sending the Container message for the asset: " + assetName + " (Type: " + containerTypeId + ")"; handleRESTException(e, msg.c_str()); if (error.hasMessages()) { for (unsigned int i = 0; i < error.messageCount(); i++) { if ((error.getHttpCode() == 500) && (0 == error.getMessage(i).compare(PIWEBAPI_PIPOINTS_NOT_CREATED))) { Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, error.getHttpCode()); m_PIstable = false; break; } } } reportAsset(assetName, "error", msg); return false; } if (m_sendFullStructure) { // Create header for Static data vector<pair<string, string>> resStaticData = OMF::createMessageHeader("Data"); // Create data for Static Data message string typeStaticData = OMF::createStaticData(row); // Build an HTTPS POST with 'resStaticData' headers // and 'typeStaticData' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { res = m_sender.sendRequest("POST", m_path, resStaticData, typeStaticData); if (!(res >= 200 && res <= 299)) { string msg = "An error occurred creating Element " + parseNameFromJson(typeStaticData) + " for the asset " + assetName; msg.append(". HTTP error code " + to_string(res)); reportAsset(assetName, "warn", msg); return false; } else if (res == 201) { Logger::getLogger()->info("Created Element %s", parseNameFromJson(typeStaticData).c_str()); } else { Logger::getLogger()->info("Confirmed Element %s", parseNameFromJson(typeStaticData).c_str()); } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { string msg = "Bad Request reported when creating Element " + parseNameFromJson(typeStaticData) + " for the asset " + assetName; handleRESTException(e, msg.c_str()); if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; Logger::getLogger()->warn("A data type change will take place to try to resolve this error"); } return false; } catch (const Unauthorized &e) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); return false; } catch (const Conflict &e) { string msg = "Conflict found creating Element " + parseNameFromJson(typeStaticData) + " for the asset " + assetName; handleRESTException(e, msg.c_str()); Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, 409); m_PIstable = false; return false; } catch (const std::exception &e) { string msg = "An error occurred creating Element " + parseNameFromJson(typeStaticData) + " for the asset " + assetName; handleRESTException(e, msg.c_str()); return false; } // Create header for Link data vector<pair<string, string>> resLinkData = OMF::createMessageHeader("Data"); string assetName = m_assetName; string AFHierarchyLevel; string prefix; string objectPrefix; auto rule = m_AssetNamePrefix.find(assetName); if (rule != m_AssetNamePrefix.end()) { auto itemArray = rule->second; objectPrefix = ""; for (auto &item : itemArray) { string AFHierarchy; string prefix; AFHierarchy = std::get<0>(item); generateAFHierarchyPrefixLevel(AFHierarchy, prefix, AFHierarchyLevel); prefix = std::get<1>(item); if (objectPrefix.empty()) { objectPrefix = prefix; } // Create data for Static Data message and parse the link names for logging purposes string typeLinkData = OMF::createLinkData(row, AFHierarchyLevel, prefix, objectPrefix, hints, true); string payload = "[" + typeLinkData + "]"; std::vector<std::pair<std::string, std::string>> links; parseLinkData(payload, links); // Build an HTTPS POST with 'resLinkData' headers // and 'typeLinkData' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { res = m_sender.sendRequest("POST", m_path, resLinkData, payload); if (!(res >= 200 && res <= 299)) { LogLinks(res, "creating Link", links); string msg = "An error occurred sending the link Data message for the asset " + assetName; msg.append(". HTTP error code " + to_string(res)); reportAsset(assetName, "warn", msg); return false; } else { LogLinks(res, (res == 201) ? "Created Link" : "Confirmed Link", links); } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest &e) { LogLinks(400, "creating Link", links); string msg = "The OMF endpoint reported a Bad Request when sending link Data for the asset" + assetName; handleRESTException(e, msg.c_str()); if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; Logger::getLogger()->warn("A data type change will take place to try to resolve this error"); } reportAsset(assetName, "warn", msg); return false; } catch (const Unauthorized &e) { LogLinks(401, "creating Link", links); Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); return false; } catch (const Conflict &e) { LogLinks(409, "creating Link", links); string msg = "Conflict found sending the link Data message for the asset " + assetName; handleRESTException(e, msg.c_str()); Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, 409); m_PIstable = false; return false; } catch (const std::exception &e) { LogLinks(0, "creating Link", links); string msg = "An error occurred sending the link Data message for the asset " + assetName; handleRESTException(e, msg.c_str()); return false; } } } else { string msg("AF hierarchy is not defined for the asset " + assetName); reportAsset(assetName, "warn", msg); } } // All data types sent: success return true; } /** * AFHierarchy - send an OMF message * * @param msgType message type : Type, Data * @param jsonData OMF message to send * @param action action to be executed, either "create"or "delete" * @return true if succeeded */ bool OMF::AFHierarchySendMessage(const string& msgType, string& jsonData, const std::string& action) { bool success = true; int res = 0; string errorMessage; vector<pair<string, string>> resType = OMF::createMessageHeader(msgType, action); try { res = m_sender.sendRequest("POST", m_path, resType, jsonData); if ( ! (res >= 200 && res <= 299) ) { success = false; } else if (msgType.compare("Data") == 0) { std::string name = parseNameFromJson(jsonData); if (name.empty()) { std::vector<std::pair<std::string, std::string>> links; parseLinkData(jsonData, links); LogLinks(res, (res == 201) ? "Created Link" : "Confirmed Link", links); } else { Logger::getLogger()->info((res == 201) ? "Created Element %s" : "Confirmed Element %s", name.c_str()); } } else if (msgType.compare("Type") == 0) { std::vector<std::string> typeIds; parseIdFromJson(jsonData, typeIds); LogIds(res, (res == 201) ? "Created Type" : "Confirmed Type", typeIds); } } catch (const BadRequest& ex) { errorMessage = "The OMF endpoint reported a Bad Request when sending AF hierarchy"; handleRESTException(ex, errorMessage.c_str()); success = false; } catch (const std::exception &ex) { handleRESTException(ex, "Error sending AF hierarchy"); errorMessage = ex.what(); success = false; } if (! success) { string errorMsg = errorMessageHandler(errorMessage); if (res != 0) Logger::getLogger()->error("Sending Asset Framework hierarchy, %d %s - %s %s", res, errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str()); else Logger::getLogger()->error("Sending Asset Framework hierarchy, %s - %s %s", errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str()); } return success; } /** * AFHierarchy - handles OMF types definition * */ bool OMF::sendAFHierarchyTypes(const std::string AFHierarchyLevel, const std::string prefix) { bool success; string jsonData; string tmpStr; jsonData = ""; tmpStr = AF_HIERARCHY_1LEVEL_TYPE; StringReplace(tmpStr, "_placeholder_typeid_", prefix + "_" + AFHierarchyLevel + "_typeid"); jsonData.append(tmpStr); success = AFHierarchySendMessage("Type", jsonData); return success; } /** * AFHierarchy - handles OMF static data * */ bool OMF::sendAFHierarchyStatic(const std::string AFHierarchyLevel, const std::string prefix) { bool success; string jsonData; string tmpStr; jsonData = ""; tmpStr = AF_HIERARCHY_1LEVEL_STATIC; StringReplace(tmpStr, "_placeholder_typeid_" , prefix + "_" + AFHierarchyLevel + "_typeid"); StringReplace(tmpStr, "_placeholder_Name_" , AFHierarchyLevel); StringReplace(tmpStr, "_placeholder_AssetId_" , prefix + "_" + AFHierarchyLevel); jsonData.append(tmpStr); success = AFHierarchySendMessage("Data", jsonData); return success; } /** * AFHierarchy - creates the link between 2 elements in the AF hierarchy * */ bool OMF::sendAFHierarchyLink(std::string parent, std::string child, std::string prefixIdParent, std::string prefixId) { bool success; string jsonData; string tmpStr; jsonData = ""; tmpStr = AF_HIERARCHY_LEVEL_LINK; StringReplace(tmpStr, "_placeholder_src_type_", prefixIdParent + "_" + parent + "_typeid"); StringReplace(tmpStr, "_placeholder_src_idx_", prefixIdParent + "_" + parent ); StringReplace(tmpStr, "_placeholder_tgt_type_", prefixId + "_" + child + "_typeid"); StringReplace(tmpStr, "_placeholder_tgt_idx_", prefixId + "_" + child); jsonData.append(tmpStr); success = AFHierarchySendMessage("Data", jsonData); return success; } /** * AFHierarchy - creates or delete the link between 2 elements in the AF hierarchy in relation to the parameter action * */ bool OMF::manageAFHierarchyLink(std::string parent, std::string child, std::string prefixIdParent, std::string prefixId, std::string childFull, string action) { bool success; string jsonData; string tmpStr; jsonData = ""; tmpStr = AF_HIERARCHY_LEVEL_LINK; StringReplace(tmpStr, "_placeholder_src_type_", prefixIdParent + "_" + parent + "_typeid"); StringReplace(tmpStr, "_placeholder_src_idx_", prefixIdParent + "_" + parent ); if (childFull.empty()) { StringReplace(tmpStr, "_placeholder_tgt_type_", prefixId + "_" + child + "_typeid"); } else { StringReplace(tmpStr, "_placeholder_tgt_type_", childFull); } StringReplace(tmpStr, "_placeholder_tgt_idx_", "A_" + prefixId + "_" + child); jsonData.append(tmpStr); success = AFHierarchySendMessage("Data", jsonData, action); return success; } /** * AFHierarchy - delete the link between 2 elements in the AF hierarchy * */ bool OMF::deleteAssetAFH(const string& assetName, string& path) { std::string pathLastLevel, pathPrefixId, assetNamePrefixId, assetNameFullId; assetNamePrefixId = getHashStored(assetName); generateAFHierarchyPrefixLevel(path, pathPrefixId, pathLastLevel); setAssetTypeTagNew(assetName, "typename_sensor", assetNameFullId); Logger::getLogger()->debug("%s - assetName :%s: childPrefixId :%s: pathStored :%s: parentLastLevel :%s: parentPrefixId :%s: childFull :%s:" , __FUNCTION__ , assetName.c_str() , assetNamePrefixId.c_str() , path.c_str() , pathLastLevel.c_str() , pathPrefixId.c_str() , assetNameFullId.c_str() ); return manageAFHierarchyLink(pathLastLevel, assetName, pathPrefixId, assetNamePrefixId, assetNameFullId, "delete"); } /** * AFHierarchy - create the link between 2 elements in the AF hierarchy * */ bool OMF::createAssetAFH(const string& assetName, string& path) { std::string pathLastLevel, pathPrefixId, assetNamePrefixId, assetNameFullId; assetNamePrefixId = getHashStored(assetName); generateAFHierarchyPrefixLevel(path, pathPrefixId, pathLastLevel); setAssetTypeTagNew(assetName, "typename_sensor", assetNameFullId); Logger::getLogger()->debug("%s - assetName :%s: childPrefixId :%s: pathStored :%s: pathLastLevel :%s: pathPrefixId :%s: childFull :%s:" , __FUNCTION__ , assetName.c_str() , assetNamePrefixId.c_str() , path.c_str() , pathLastLevel.c_str() , pathPrefixId.c_str() , assetNameFullId.c_str() ); return manageAFHierarchyLink(pathLastLevel, assetName, pathPrefixId, assetNamePrefixId, assetNameFullId, "create"); } /** * Creates the hierarchies tree in the AF as defined in the configuration item DefaultAFLocation * each level is separated by / * the implementation is available for PI Web API only * The hierarchy is created/recreated if an OMF type message is sent* * */ bool OMF::handleAFHierarchySystemWide() { bool success = true; std::string level; std::string previousLevel; string parentPath; parentPath = evaluateParentPath(m_DefaultAFLocation, AFHierarchySeparator); success = sendAFHierarchyLevels(parentPath, m_DefaultAFLocation, m_AFHierarchyLevel); return success; } /** * Creates all the AF hierarchies levels as requested by the input parameter * Creates the AF hierarchy if it was not already created * * @param AFHierarchy Hierarchies levels to be created as relative or absolute path * @return out true if succeeded */ bool OMF::sendAFHierarchy(string AFHierarchy) { bool success = true; string path; string dummy; string parentPath; if(find(m_afhHierarchyAlreadyCreated.begin(), m_afhHierarchyAlreadyCreated.end(), AFHierarchy) == m_afhHierarchyAlreadyCreated.end()){ if (AFHierarchy.at(0) == '/') { // Absolute path path = AFHierarchy; parentPath = evaluateParentPath(path, AFHierarchySeparator); } else { // relative path path = m_DefaultAFLocation + "/" + AFHierarchy; parentPath = m_DefaultAFLocation; } m_afhHierarchyAlreadyCreated.push_back(AFHierarchy); if (success = sendAFHierarchyLevels(parentPath, path, dummy)) { Logger::getLogger()->debug("%s - path created :%s:", __FUNCTION__, AFHierarchy.c_str() ); } } else { Logger::getLogger()->debug("%s - path already created :%s:", __FUNCTION__, AFHierarchy.c_str() ); } return success; } /** * Creates all the AF hierarchies level as requested by the input parameter * * @param parentPath Parent path * @param path Full path of hierarchies to create * @param lastLevel last level of the created hierarchy * @return true if succeeded */ bool OMF::sendAFHierarchyLevels(string parentPath, string path, std::string &lastLevel) { bool success = true; std::string level; std::string previousLevel; StringReplaceAll(path, AFH_ESCAPE_SEQ ,AFH_ESCAPE_CHAR); StringReplaceAll(path, AFH_SLASH_ESCAPE ,AFH_SLASH_ESCAPE_TMP); if (path.find(AFHierarchySeparator) == string::npos) { string prefixId; // only 1 single level of hierarchy StringReplaceAll(path, AFH_SLASH_ESCAPE_TMP ,AFH_SLASH); prefixId = generateUniquePrefixId(path); success = sendAFHierarchyTypes(path, prefixId); if (success) { success = sendAFHierarchyStatic(path,prefixId); } lastLevel = path; } else { string pathFixed; string parentPathFixed; string prefixId; string prefixIdParent; string previousLevelPath; string AFHierarchyLevel; string levelPath; pathFixed = StringSlashFix(path); std::stringstream pathStream(pathFixed); // multiple hierarchy levels while (std::getline(pathStream, level, AFHierarchySeparator)) { StringReplaceAll(level, AFH_SLASH_ESCAPE_TMP ,AFH_SLASH); levelPath = previousLevelPath + AFHierarchySeparator + level; levelPath = StringSlashFix(levelPath); prefixId = generateUniquePrefixId(levelPath); if (!sendAFHierarchyTypes(level, prefixId)) { return false; } if (!sendAFHierarchyStatic(level, prefixId)) { return false; } // Creates the link between the AF level if (previousLevel != "") { parentPathFixed = StringSlashFix(previousLevelPath); prefixIdParent = generateUniquePrefixId(parentPathFixed); if (!sendAFHierarchyLink(previousLevel, level, prefixIdParent, prefixId)) { return false; } } previousLevelPath = levelPath; previousLevel = level; } lastLevel = level; } return success; } /** * Handle the creation of AF hierarchies * * @return true if succeeded */ bool OMF::handleAFHierarchy() { bool success = true; if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { success = handleAFHierarchySystemWide(); } return success; } /** * Sets the value of the prefix used for the objects naming * */ void OMF::setAFHierarchy() { std::string level; std::string AFLocation; AFLocation = m_DefaultAFLocation; if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { // Implementation only for PI Web API StringReplaceAll(AFLocation, AFH_ESCAPE_SEQ, AFH_ESCAPE_CHAR); StringReplaceAll(AFLocation, AFH_SLASH_ESCAPE ,AFH_SLASH_ESCAPE_TMP); std::stringstream defaultAFLocation(AFLocation); if (AFLocation.find(AFHierarchySeparator) == string::npos) { // only 1 single level of hierarchy m_AFHierarchyLevel = AFLocation; } else { // multiple hierarchy levels while (std::getline(defaultAFLocation, level, AFHierarchySeparator)) { ; } m_AFHierarchyLevel = level; } StringReplaceAll(m_AFHierarchyLevel, AFH_SLASH_ESCAPE_TMP ,AFH_SLASH); } } /** * Send all the readings to the PI Server * * @param readings A vector of readings data pointers * @param skipSendDataTypes Send datatypes only once (default is true) * @param compression If true, compress JSON payload before sending to PI * @return Number of readings sent on success, 0 otherwise */ uint32_t OMF::sendToServer(const vector<Reading *>& readings, bool compression, bool skipSentDataTypes) { bool AFHierarchySent = false; bool sendLinkedTypes = false; bool sendDataTypes; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; string measurementId; #if INSTRUMENT ostringstream threadId; threadId << std::this_thread::get_id(); struct timeval start, t1, t2, t3, t4, t5; gettimeofday(&start, NULL); #endif if (m_linkedProperties && m_baseTypesSent == false) { if (!sendBaseTypes() || !sendFledgeAssetType()) { if (!m_connected || !m_PIstable) { return 0; } Logger::getLogger()->error("Unable to send base types, linked assets will not be sent. The system will fall back to using complex types."); m_linkedProperties = false; } else { m_baseTypesSent = true; } } // TODO We do not need the superset stuff if we are using linked data types, // this would save us iterating over the data an extra time and reduce our // memory footprint // // Create a superset of all the datapoints for each assetName // the superset[assetName] is then passed to routines which handles // creation of OMF data types. This is used for the initial type // handling of complex data types. OMF::setMapObjectTypes(readings, m_SuperSetDataPoints); #if INSTRUMENT gettimeofday(&t1, NULL); #endif // Applies the PI-Server naming rules to the AF hierarchy { bool changed = false; string origDefaultAFLocation; origDefaultAFLocation = m_DefaultAFLocation; m_DefaultAFLocation = ApplyPIServerNamingRulesPath(m_DefaultAFLocation, &changed); if (changed) { Logger::getLogger()->info("%s - AF hierarchy changed to follow PI-Server naming rules from :%s: to :%s:", __FUNCTION__, origDefaultAFLocation.c_str(), m_DefaultAFLocation.c_str() ); } } /* * Iterate over readings: * - Send/cache Types * - transform a reading to OMF format * - add OMF data to new vector */ // Used for logging string json_not_compressed; string OMFHintAFHierarchyTmp; string OMFHintAFHierarchy; // Create the class that deals with the linked data generation OMFLinkedData linkedData(&m_linkedAssetState, m_PIServerEndpoint); linkedData.setSendFullStructure(m_sendFullStructure); linkedData.setDelimiter(m_delimiter); linkedData.setFormats(getFormatType(OMF_TYPE_FLOAT), getFormatType(OMF_TYPE_INTEGER)); linkedData.setStaticData(m_staticData); // Create the lookup data for this block of readings linkedData.buildLookup(readings); unsigned int idx = 0; std::size_t blockSize = readings.size() / m_numBlocks; for (std::size_t i = 0; i < readings.size(); i += blockSize) { OMFBuffer payload; payload.append('['); bool pendingSeparator = false; for (std::size_t j = i; j < (i + blockSize) && (j < readings.size()); ++j) { Reading *reading = readings[j]; OMFHintAFHierarchy = ""; Logger::getLogger()->debug("sendToServer[%u/%u]: %s (%s)", idx++, readings.size(), reading->getAssetName().c_str(), DataPointNamesAsString(*reading).c_str()); // Fetch and parse any OMFHint for this reading Datapoint *hintsdp = reading->getDatapoint("OMFHint"); OMFHints *hints = NULL; bool usingTagHint = false; long typeId = 0; if (hintsdp) { hints = new OMFHints(hintsdp->getData().toString()); const vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTagHint)) { Logger::getLogger()->debug("Using OMF Tag hint: %s", (*it)->getHint().c_str()); keyComplete.append("_" + (*it)->getHint()); usingTagHint = true; } else if (typeid(**it) == typeid(OMFAFLocationHint)) { OMFHintAFHierarchyTmp = (*it)->getHint(); OMFHintAFHierarchy = variableValueHandle(*reading, OMFHintAFHierarchyTmp); Logger::getLogger()->debug("%s - OMF AFHierarchy original value :%s: new :%s:" ,__FUNCTION__ ,OMFHintAFHierarchyTmp.c_str() ,OMFHintAFHierarchy.c_str() ); } else if (typeid(**it) == typeid(OMFLegacyTypeHint)) { Logger::getLogger()->warn("OMFHint LegacyType has been deprecated. The hint value '%s' will be ignored.", (*it)->getHint().c_str()); } } } // Applies the PI-Server naming rules to the AssetName { bool changed; string assetNameFledge; assetNameFledge = reading->getAssetName(); m_assetName = ApplyPIServerNamingRulesObj(assetNameFledge, &changed); if (changed) { Logger::getLogger()->info("%s - 3 Asset name changed to follow PI-Server naming rules from :%s: to :%s:", __FUNCTION__, assetNameFledge.c_str(), m_assetName.c_str() ); } } // Since hints are attached to individual readings that are processed by the north plugin if an AFLocation // hint is present it will override any default AFLocation or AF Location rules defined in the north plugin configuration. if ( ! createAFHierarchyOmfHint(m_assetName, OMFHintAFHierarchy) ) { if (!evaluateAFHierarchyRules(m_assetName, *reading)) { return 0; } } if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = m_assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(m_assetName) == NAMINGSCHEME_CONCISE) { keyComplete = m_assetName; } else { retrieveAFHierarchyPrefixAssetName(m_assetName, AFHierarchyPrefix, AFHierarchyLevel); keyComplete = AFHierarchyPrefix + "_" + m_assetName; } } if (! AFHierarchySent) { setAFHierarchy(); } // Use old style complex types if the user has forced it via configuration, // we are running against an EDS endpoint or Connector Relay or we have types defined for this // asset already if (m_legacy || m_PIServerEndpoint == ENDPOINT_EDS || m_PIServerEndpoint == ENDPOINT_CR || m_OMFDataTypes->find(keyComplete) != m_OMFDataTypes->end()) { // Legacy type support if (! usingTagHint) { /* * Check the OMFHints, if there are any, to see if we have a * type name that should be used for this asset. * We will still create the type, but the name will be fixed * as the value of this hint. */ bool usingTypeNameHint = false; if (hints) { const vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTypeNameHint)) { Logger::getLogger()->debug("Using OMF TypeName hint: %s", (*it)->getHint().c_str()); keyComplete.append("_" + (*it)->getHint()); usingTypeNameHint = true; break; } } } auto it = m_SuperSetDataPoints.find(m_assetName); if (it == m_SuperSetDataPoints.end()) { // The asset has only unsupported properties, so it is ignored continue; } sendDataTypes = (skipSentDataTypes == true) ? // Send if not already sent !OMF::getCreatedTypes(keyComplete, *reading, hints) : // Always send types true; Reading* datatypeStructure = NULL; if (sendDataTypes && !usingTypeNameHint) { // Increment type-id of assetName in memory cache OMF::incrementAssetTypeIdOnly(keyComplete); // Remove data and keep type-id OMF::clearCreatedTypes(keyComplete); // Get the supersetDataPoints for current assetName auto it = m_SuperSetDataPoints.find(m_assetName); if (it != m_SuperSetDataPoints.end()) { datatypeStructure = (*it).second; } } if (m_sendFullStructure) { // The AF hierarchy is created/recreated if an OMF type message is sent // it sends the hierarchy once if (sendDataTypes and !AFHierarchySent) { if (!handleAFHierarchy()) { return 0; } AFHierarchySent = true; } } if (usingTypeNameHint) { if (sendDataTypes && !OMF::handleDataTypes(keyComplete, *reading, skipSentDataTypes, hints)) { // Failure return 0; } } else { // Check first we have supersetDataPoints for the current reading if ((sendDataTypes && datatypeStructure == NULL) || // Handle the data types of the current reading (sendDataTypes && // Send data type !OMF::handleDataTypes(keyComplete, *datatypeStructure, skipSentDataTypes, hints) && // Data type not sent: (!m_changeTypeId || // Increment type-id and re-send data types !OMF::handleTypeErrors(keyComplete, *datatypeStructure, hints)))) { // Remove all assets supersetDataPoints OMF::unsetMapObjectTypes(m_SuperSetDataPoints); // Failure return 0; } } // Create the key for dataTypes sending once typeId = OMF::getAssetTypeId(m_assetName); } measurementId = generateMeasurementId(m_assetName); if (OMFData(payload, *reading, measurementId, pendingSeparator, m_PIServerEndpoint, AFHierarchyPrefix, hints).hasData()) { pendingSeparator = true; } sendLinkedTypes = false; } else { // We do this before the send so we know if it was sent for the first time // in the processReading call auto lookup = m_linkedAssetState.find(m_assetName + m_delimiter); // Send data for this reading using the new mechanism if (linkedData.processReading(payload, pendingSeparator, *reading, AFHierarchyPrefix, hints)) pendingSeparator = true; sendLinkedTypes = true; } if (hints) { delete hints; } } // end 'for' one block of Readings #if INSTRUMENT gettimeofday(&t2, NULL); #endif payload.append(']'); // TODO Improve this with coalesceCompressed call and avoid string on the stack // and avoid copy into a string const char *omfData = payload.coalesce(); #if INSTRUMENT gettimeofday(&t3, NULL); #endif vector<pair<string, string>> containerHeader = OMF::createMessageHeader("Container"); OMFError omfError; if (!linkedData.flushContainers(m_sender, m_path, containerHeader, omfError, &m_connected)) { if (omfError.hasMessages()) { // Exit immediately if attempting to create PI Points results in HTTP 409 (Conflict) // or HTTP 500 (Internal Server Error) with a specific error message. // Both mean that processing cannot continue because the PI Server cannot store data. int httpCode = omfError.getHttpCode(); for (unsigned int i = 0; i < omfError.messageCount(); i++) { if ((httpCode == 409) || ((httpCode == 500) && (0 == omfError.getMessage(i).compare(PIWEBAPI_PIPOINTS_NOT_CREATED)))) { Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, httpCode); m_PIstable = false; break; } } } return 0; } /** * Types messages sent, now transform each reading to OMF format. * * After formatting the new vector of data can be sent * with one message only */ // Create header for Readings data vector<pair<string, string>> readingData = OMF::createMessageHeader("Data", m_dataActionCode); if (compression) readingData.push_back(pair<string, string>("compression", "gzip")); // Build an HTTPS POST with 'readingData headers // and 'allReadings' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { int res = m_sender.sendRequest("POST", m_path, readingData, compression ? compress_string(omfData) : omfData); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("Sending JSON readings , " "- error: HTTP code |%d| - %s %s", res, m_sender.getHostPort().c_str(), m_path.c_str() ); delete[] omfData; return 0; } #if INSTRUMENT gettimeofday(&t4, NULL); #endif #if INSTRUMENT struct timeval tm; double timeT1, timeT2, timeT3, timeT4, timeT5; timersub(&t1, &start, &tm); timeT1 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t3, &t2, &tm); timeT3 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t4, &t3, &tm); timeT4 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t5, &t4, &tm); timeT5 = tm.tv_sec + ((double)tm.tv_usec / 1000000); Logger::getLogger()->warn("Timing seconds - thread %s - superSet %6.3f - Loop %6.3f - compress %6.3f - send data %6.3f - readings %d - msg size %d", threadId.str().c_str(), timeT1, timeT2, timeT3, timeT4, readings.size(), strlen(omfData) ); #endif delete[] omfData; } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { OMFError error(m_sender.getHTTPResponse()); error.Log("The OMF endpoint reported a Bad Request when sending data"); if (OMF::isDataTypeError(e.what())) { // Some assets have invalid or redefined data type // NOTE: // // 1- We consider this a NOT blocking issue. // 2- Type-id is not incremented // 3- Data Types cache is cleared: next sendData call // will send data types again. string errorMsg = errorMessageHandler(e.what()); Logger::getLogger()->warn("Sending JSON readings, " "not blocking issue: %s - %s %s", errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str()); // Extract assetName from error message string assetName; if (m_PIServerEndpoint == ENDPOINT_CR) { assetName = OMF::getAssetNameFromError(e.what()); } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { // Currently not implemented/supported as PI WEB API does not // report in the error message the asset causing the problem assetName = ""; } if (assetName.empty()) { Logger::getLogger()->warn("Sending JSON readings, " "not blocking issue: assetName not found in error message, " " no types redefinition"); } else { // Remove data and keep type-id OMF::clearCreatedTypes(assetName); Logger::getLogger()->warn("Sending JSON readings, " "not blocking issue: 'type-id' of assetName '%s' " "has been set to %d " "- %s %s", assetName.c_str(), OMF::getAssetTypeId(assetName), m_sender.getHostPort().c_str(), m_path.c_str() ); } delete[] omfData; // It returns size instead of 0 as the rows in the block should be skipped in case of an error // as it is considered a not blocking ones. return readings.size(); } else { string errorMsg = errorMessageHandler(e.what()); Logger::getLogger()->error("Sending JSON data error : %s - %s %s", errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str() ); delete[] omfData; } // Failure return 0; } catch (const Unauthorized &e) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); delete[] omfData; return 0; } catch (const Conflict& e) { handleRESTException(e, "Conflict sending Data"); std::vector<std::pair<std::string, std::string>> links; parseLinkData(omfData, links); LogLinks(409, "creating Link", links); Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, 409); m_PIstable = false; delete[] omfData; return 0; } catch (const std::exception &e) { handleRESTException(e, "Error sending Data"); std::vector<std::pair<std::string, std::string>> links; parseLinkData(omfData, links); LogLinks(0, "creating Link", links); linkedData.clearLALookup(readings, i, i + blockSize, m_delimiter); delete[] omfData; return 0; } } // end 'for' all blocks of Readings // Create the AF Links between assets if AF structure creation with linked types is requested if (sendLinkedTypes && m_sendFullStructure) { for (Reading *reading : readings) { OMFHints *hints = NULL; Datapoint *hintsdp = reading->getDatapoint("OMFHint"); if (hintsdp) { hints = new OMFHints(hintsdp->getData().toString()); } m_assetName = ApplyPIServerNamingRulesObj(reading->getAssetName(), nullptr); auto lookup = m_linkedAssetState.find(m_assetName + m_delimiter); if (lookup->second.afLinkState() == false) { // If the hierarchy has not already been sent then send it if (!AFHierarchySent) { if (!handleAFHierarchy()) { delete hints; return 0; } AFHierarchySent = true; } if (!sendAFLinks(*reading, hints)) { delete hints; return 0; } lookup->second.afLinkSent(); } delete hints; } } // Remove all assets supersetDataPoints OMF::unsetMapObjectTypes(m_SuperSetDataPoints); // Return number of sent readings to the caller return readings.size(); } /** * Apply a handling on the error message in relation to the End Point * */ string OMF::errorMessageHandler(const string &msg) { string errorMsg; if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { PIWebAPI piWeb; errorMsg = piWeb.errorMessageHandler(msg); } else { errorMsg = msg; } return(errorMsg); } /** * Send all the readings to the PI Server. * Note: this overload is never called. * * @param readings A vector of readings data * @param skipSendDataTypes Send datatypes only once (default is true) * @return Number of readings sent on success, 0 otherwise */ uint32_t OMF::sendToServer(const vector<Reading>& readings, bool skipSentDataTypes) { /* * Iterate over readings: * - Send/cache Types * - transform a reading to OMF format * - add OMF data to new vector */ string measurementId; OMFBuffer payload; payload.append('['); // Fetch Reading data for (vector<Reading>::const_iterator elem = readings.begin(); elem != readings.end(); ++elem) { bool sendDataTypes; OMFHints *hints = NULL; Datapoint *hintsdp = elem->getDatapoint(OMF_HINT); if (hintsdp) { hints = new OMFHints(hintsdp->getData().toString()); } // Create the key for dataTypes sending once m_assetName = ApplyPIServerNamingRulesObj((*elem).getAssetName(), nullptr); long typeId = OMF::getAssetTypeId(m_assetName); string key(m_assetName); measurementId = generateMeasurementId(m_assetName); sendDataTypes = (skipSentDataTypes == true) ? // Send if not already sent !OMF::getCreatedTypes(key, (*elem), hints) : // Always send types true; // Handle the data types of the current reading if (sendDataTypes && !OMF::handleDataTypes(key, *elem, skipSentDataTypes, hints)) { // Failure return 0; } // Add into JSON string the OMF transformed Reading data if (OMFData(payload, *elem, measurementId, false, m_PIServerEndpoint, m_AFHierarchyLevel, hints).hasData()) if (elem < (readings.end() -1 )) payload.append(','); } payload.append(']'); // Build headers for Readings data vector<pair<string, string>> readingData = OMF::createMessageHeader("Data"); const char *omfData = payload.coalesce(); // Build an HTTPS POST with 'readingData headers and 'allReadings' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { int res = m_sender.sendRequest("POST", m_path, readingData, omfData); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("Sending JSON readings data " "- error: HTTP code |%d| - HostPort |%s| - path |%s| - OMF message |%s|", res, m_sender.getHostPort().c_str(), m_path.c_str(), omfData); delete[] omfData; return 0; } } catch (const std::exception& e) { Logger::getLogger()->error("Sending JSON readings data " "- generic error: |%s| - HostPort |%s| - path |%s| - OMF message |%s|", e.what(), m_sender.getHostPort().c_str(), m_path.c_str(), omfData); delete[] omfData; return false; } delete[] omfData; // Return number of sent readings to the caller return readings.size(); } /** * Send a single reading to the PI Server. * Note: this overload is never called. * * @param reading A reading to send * @return 1 = on success, 0 otherwise */ uint32_t OMF::sendToServer(const Reading& reading, bool skipSentDataTypes) { return OMF::sendToServer(&reading, skipSentDataTypes); } /** * Send a single reading pointer to the PI Server. * Note: this overload is never called. * * @param reading A reading pointer to send * @return 1 = on success, 0 otherwise */ uint32_t OMF::sendToServer(const Reading* reading, bool skipSentDataTypes) { string measurementId; OMFBuffer payload; payload.append('['); m_assetName = ApplyPIServerNamingRulesObj(reading->getAssetName(), nullptr); string key(m_assetName); measurementId = generateMeasurementId(m_assetName); Datapoint *hintsdp = reading->getDatapoint("OMFHint"); OMFHints *hints = NULL; if (hintsdp) { hints = new OMFHints(hintsdp->getData().toString()); } if (!OMF::handleDataTypes(key, *reading, skipSentDataTypes, hints)) { // Failure return 0; } long typeId = OMF::getAssetTypeId(m_assetName); // Add into JSON string the OMF transformed Reading data OMFData(payload, *reading, measurementId, false, m_PIServerEndpoint, m_AFHierarchyLevel, hints); payload.append(']'); // Build headers for Readings data vector<pair<string, string>> readingData = OMF::createMessageHeader("Data"); const char *omfData = payload.coalesce(); // Build an HTTPS POST with 'readingData headers and 'allReadings' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { int res = m_sender.sendRequest("POST", m_path, readingData, omfData); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("Sending JSON readings data " "- error: HTTP code |%d| - HostPort |%s| - path |%s| - OMF message |%s|", res, m_sender.getHostPort().c_str(), m_path.c_str(), omfData); delete[] omfData; return 0; } } catch (const std::exception& e) { string errorMsg = errorMessageHandler(e.what()); Logger::getLogger()->error("Sending JSON readings data " "- generic error: %s - %s %s", errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str() ); delete[] omfData; return false; } delete[] omfData; // Return number of sent readings to the caller return 1; } /** * Creates a vector of HTTP header to be sent to Server * * @param type The message type ('Type', 'Container', 'Data') * @param action Action to execute, either "create" or "delete" * @return A vector of HTTP Header string pairs */ const vector<pair<string, string>> OMF::createMessageHeader(const std::string& type, const std::string& action) const { vector<pair<string, string>> res; res.push_back(pair<string, string>("messagetype", type)); res.push_back(pair<string, string>("producertoken", m_producerToken)); res.push_back(pair<string, string>("omfversion", m_OMFVersion)); res.push_back(pair<string, string>("messageformat", "JSON")); res.push_back(pair<string, string>("action", action)); return res; } /** * Creates the Type message for data type definition * * @param reading A reading data * @return Type JSON message as string */ const std::string OMF::createTypeData(const Reading& reading, OMFHints *hints) { // Build the Type data message (JSON Array) string tData="["; if (m_sendFullStructure) { // Add the Static data part tData.append("{ \"type\": \"object\", \"properties\": { "); for (auto it = m_staticData->cbegin(); it != m_staticData->cend(); ++it) { tData.append("\""); tData.append(ApplyPIServerNamingRulesObj(it->first.c_str(), nullptr) ); tData.append("\": {\"type\": \"string\"},"); } // Connector relay / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_EDS ) { tData.append("\"Name\": { \"type\": \"string\", \"isindex\": true } }, " "\"classification\": \"static\", \"id\": \""); } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { tData.append("\"Name\": { \"type\": \"string\", \"isname\": true }, "); tData.append("\"AssetId\": { \"type\": \"string\", \"isindex\": true } "); tData.append(" }, \"classification\": \"static\", \"id\": \""); } // Add type_id + '_' + asset_name + '_typename_sensor' OMF::setAssetTypeTag(m_assetName, "typename_sensor", tData); tData.append("\" }, "); } // Add the Dynamic data part tData.append(" { \"type\": \"object\", \"properties\": {"); /* We add for each reading * the DataPoint name & type * type is 'integer' for INT * 'number' for FLOAT * 'string' for STRING */ bool ret = true; const vector<Datapoint*> data = reading.getReadingData(); /** * This loop creates: * "dataName": {"type": "dataType"}, */ for (vector<Datapoint*>::const_iterator it = data.begin(); it != data.end(); ++it) { string dpName = (*it)->getName(); if (dpName.compare(OMF_HINT) == 0) { // We never include OMF hints in the data we send to PI continue; } string omfType; if (!isTypeSupported( (*it)->getData())) { omfType = OMF_TYPE_UNSUPPORTED; } else { omfType = omfTypes[((*it)->getData()).getType()]; } string format = OMF::getFormatType(omfType); if (hints && (omfType == OMF_TYPE_FLOAT || omfType == OMF_TYPE_INTEGER)) { const vector<OMFHint *> omfHints = hints->getHints(dpName); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFNumberHint)) { format = (*it)->getHint(); break; } if (typeid(**it) == typeid(OMFIntegerHint)) { omfType = OMF_TYPE_INTEGER; format = (*it)->getHint(); break; } } } if (format.compare(OMF_TYPE_UNSUPPORTED) == 0) { //TO DO: ADD LOG ret = false; continue; } // Add datapoint Name tData.append("\"" + ApplyPIServerNamingRulesObj(dpName, nullptr) + "\""); tData.append(": {\"type\": \""); // Add datapoint Type tData.append(omfType); tData.append("\", \"name\": \""); tData.append(ApplyPIServerNamingRulesObj(dpName, nullptr) ); // Applies a format if it is defined if (! format.empty() ) { tData.append("\", \"format\": \""); tData.append(format); } tData.append("\"}, "); } // Add time field tData.append("\"Time\": {\"type\": \"string\", \"isindex\": true, \"format\": \"date-time\"}}, " "\"classification\": \"dynamic\", \"id\": \""); bool typeNameSet = false; if (hints) { const vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTypeNameHint)) { Logger::getLogger()->debug("Using OMF TypeName hint: %s", (*it)->getHint().c_str()); tData.append((*it)->getHint()); typeNameSet = true; break; } } } if (!typeNameSet) { // Add type_id + '_' + asset_name + '__typename_measurement' OMF::setAssetTypeTag(m_assetName, "typename_measurement", tData); } tData.append("\" }]"); // Check we have to return empty data or not if (!ret && data.size() == 1) { // TODO: ADD LOGGING return string(""); } else { // Return JSON string return tData; } } /** * Creates the Container message for data type definition * * @param reading A reading data * @return Type JSON message as string */ const std::string OMF::createContainerData(const Reading& reading, OMFHints *hints) { string assetName = m_assetName; string measurementId; // Build the Container data (JSON Array) string cData = "[{\"typeid\": \""; string typeName = ""; if (hints) { const std::vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTypeNameHint)) { typeName = (*it)->getHint(); Logger::getLogger()->debug("Using OMF TypeName hint: %s", typeName.c_str()); } } } if (typeName.length()) { cData.append(typeName); } else { // Add type_id + '_' + asset_name + '__typename_measurement' OMF::setAssetTypeTag(assetName, "typename_measurement", cData); } measurementId = generateMeasurementId(assetName); // Apply any TagName hints to modify the containerid if (hints) { const std::vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTagNameHint)) { measurementId = (*it)->getHint(); Logger::getLogger()->debug("Using OMF TagName hint: %s", measurementId.c_str()); break; } } } cData.append("\", \"id\": \"" + measurementId); cData.append("\"}]"); // Return JSON string return cData; } /** * Generate the container id for the given asset * * @param assetName Asset for which the container id should be generated * @return Container id for the requested asset */ std::string OMF::generateMeasurementId(const string& assetName) { std::string measurementId; string AFHierarchyPrefix; string AFHierarchyLevel; long namingScheme; long typeId; typeId = OMF::getAssetTypeId(assetName); namingScheme = getNamingScheme(assetName); if (namingScheme == NAMINGSCHEME_COMPATIBILITY || namingScheme == NAMINGSCHEME_HASH) { measurementId = to_string(typeId) + "measurement_" + assetName; // Add the 1st level of AFHierarchy as a prefix to the name in case of PI Web API if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { retrieveAFHierarchyPrefixAssetName(assetName, AFHierarchyPrefix, AFHierarchyLevel); measurementId = AFHierarchyPrefix + "_" + measurementId; } } else { if (typeId > 1) { measurementId = to_string(typeId) + "measurement_" + assetName; } else { measurementId = assetName; } } Logger::getLogger()->debug("%s - namingScheme default :%ld: namingScheme applied :%ld: assetName :%s: typeId :%ld: measurementId :%s:", __FUNCTION__, m_NamingScheme, namingScheme, assetName.c_str(), typeId, measurementId.c_str() ); return(measurementId); } /** * Generate a suffix for the given asset in relation to the selected naming schema and the value of the type id * * @param assetName Asset for which the suffix should be generated * @param typeId Type id of the asset * @return Suffix to be used for the given asset */ std::string OMF::generateSuffixType(string &assetName, long typeId) { std::string suffix; long namingScheme; namingScheme = getNamingScheme(assetName); if (namingScheme == NAMINGSCHEME_COMPATIBILITY || namingScheme == NAMINGSCHEME_SUFFIX) { suffix = AF_TYPES_SUFFIX + to_string(typeId); } else { if (typeId > 1) { suffix = AF_TYPES_SUFFIX + to_string(typeId); } } Logger::getLogger()->debug("%s - namingScheme default :%ld: namingScheme applied :%ld: typeId :%ld: suffix :%s:", __FUNCTION__, m_NamingScheme, namingScheme, typeId, suffix.c_str()); return(suffix); } /** * Creates the Static Data message for data type definition * * Note: type is 'Data' * * @param reading A reading data * @return Type JSON message as string */ const std::string OMF::createStaticData(const Reading& reading) { string assetName; // Build the Static data (JSON Array) string sData = "["; sData.append("{\"typeid\": \""); assetName = m_assetName; long typeId = getAssetTypeId(assetName); // Add type_id + '_' + asset_name + '_typename_sensor' OMF::setAssetTypeTag(assetName, "typename_sensor", sData); sData.append("\", \"values\": [{"); for (auto it = m_staticData->cbegin(); it != m_staticData->cend(); ++it) { sData.append("\""); sData.append(ApplyPIServerNamingRulesObj(it->first.c_str(), nullptr) ); sData.append("\": \""); sData.append(it->second.c_str()); sData.append("\", "); } sData.append(" \"Name\": \""); // Add asset_name // Connector relay / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR) { sData.append(assetName); } else if (m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_EDS) { sData.append(assetName); } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { string AFHierarchyPrefix; string AFHierarchyLevel; retrieveAFHierarchyPrefixAssetName(assetName, AFHierarchyPrefix, AFHierarchyLevel); sData.append(assetName + generateSuffixType(assetName, typeId)); sData.append("\", \"AssetId\": \""); sData.append("A_" + AFHierarchyPrefix + "_" + assetName + generateSuffixType(assetName, typeId) ); } sData.append("\"}]}]"); // Return JSON string return sData; } /** * Creates the Link Data message for data type definition * * Note: type is 'Data' * * @param reading A reading data * @param AFHierarchyLevel The AF element we are placing the reading in * @param AFHierarchyPrefix The prefix we use for the AF Element * @param objectPrefix The object prefix we are using for this asset * @param legacy We are using legacy, complex types for this reading * @return Type JSON message as string */ std::string OMF::createLinkData(const Reading& reading, std::string& AFHierarchyLevel, std::string& AFHierarchyPrefix, std::string& objectPrefix, OMFHints *hints, bool legacy) { string targetTypeId; string measurementId; string assetName = m_assetName; // Build the Link data (JSON Array) long typeId = getAssetTypeId(assetName); string lData = "{\"typeid\": \"__Link\", \"values\": ["; // Handles the structure for the Connector Relay // not supported by PI Web API // Connector relay / ADH / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_EDS ) { lData.append("{\"source\": {\"typeid\": \""); // Add type_id + '_' + asset_name + '__typename_sensor' OMF::setAssetTypeTag(assetName, "typename_sensor", lData); lData.append("\", \"index\": \"_ROOT\"},"); lData.append("\"target\": {\"typeid\": \""); // Add type_id + '_' + asset_name + '__typename_sensor' OMF::setAssetTypeTag(assetName, "typename_sensor", lData); lData.append("\", \"index\": \""); // Add asset_name lData.append(assetName); lData.append("\"}}"); } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { // Link the asset to the 1st level of AF hierarchy if the end point is PI Web API string tmpStr = AF_HIERARCHY_1LEVEL_LINK; OMF::setAssetTypeTag(assetName, "typename_sensor", targetTypeId); StringReplace(tmpStr, "_placeholder_src_type_", AFHierarchyPrefix + "_" + AFHierarchyLevel + "_typeid"); StringReplace(tmpStr, "_placeholder_src_idx_", AFHierarchyPrefix + "_" + AFHierarchyLevel ); if (legacy) { StringReplace(tmpStr, "_placeholder_tgt_type_", targetTypeId); StringReplace(tmpStr, "_placeholder_tgt_idx_", "A_" + objectPrefix + "_" + assetName + generateSuffixType(assetName, typeId) ); } else { // Get the new asset name after hints are applied for the linked data messages string newAssetName = assetName; if (hints) { const std::vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTagNameHint)) { string hintValue = (*it)->getHint(); Logger::getLogger()->debug("Using OMF TagName hint: %s for asset %s", hintValue.c_str(), assetName.c_str()); newAssetName = hintValue; } if (typeid(**it) == typeid(OMFTagHint)) { string hintValue = (*it)->getHint(); Logger::getLogger()->debug("Using OMF Tag hint: %s for asset %s", hintValue.c_str(), assetName.c_str()); newAssetName = hintValue; } } } StringReplace(tmpStr, "_placeholder_tgt_type_", "FledgeAsset"); StringReplace(tmpStr, "_placeholder_tgt_idx_", newAssetName); } lData.append(tmpStr); } if (legacy) { lData.append(",{\"source\": {\"typeid\": \""); // Add type_id + '_' + asset_name + '__typename_sensor' OMF::setAssetTypeTag(assetName, "typename_sensor", lData); lData.append("\", \"index\": \""); if (m_PIServerEndpoint == ENDPOINT_CR) { // Add asset_name lData.append(assetName); } else if (m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_EDS) { // Add asset_name lData.append(assetName); } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { lData.append("A_" + objectPrefix + "_" + assetName + generateSuffixType(assetName, typeId) ); } measurementId = generateMeasurementId(assetName); // Apply any TagName hints to modify the containerid if (hints) { const std::vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTagNameHint)) { measurementId = (*it)->getHint(); Logger::getLogger()->debug("Using OMF TagName hint: %s", measurementId.c_str()); break; } } } lData.append("\"}, \"target\": {\"containerid\": \"" + measurementId); lData.append("\"}}"); } lData.append("]}"); // Return JSON string return lData; } /** * Calculate the prefix to be used for AF objects and the last level of the hierarchies * from a given AF path * * @param path Path to evaluate * @param out/prefix Calculated prefix * @param out/AFHierarchyLevel last level of the hierarchies evaluated form the path */ void OMF::generateAFHierarchyPrefixLevel(string& path, string& prefix, string& AFHierarchyLevel) { string pathFixed; AFHierarchyLevel = extractLastLevel(path, AFHierarchySeparator); pathFixed = StringSlashFix(path); prefix = generateUniquePrefixId(pathFixed); } /** * Retrieve from the map the prefix and the last level of the hierarchy from a given assetname * * @param path assetName to evaluate * @param out/prefix Calculated prefix * @param out/AFHierarchyLevel Last level of the hierarchy */ void OMF::retrieveAFHierarchyPrefixAssetName(const string& assetName, string& prefix, string& AFHierarchyLevel) { string AFHierarchy; string prefixTmp; // Metadata Rules - Exist auto rule = m_AssetNamePrefix.find(assetName); if (rule != m_AssetNamePrefix.end()) { AFHierarchy = std::get<0>(rule->second[0]); generateAFHierarchyPrefixLevel(AFHierarchy, prefixTmp, AFHierarchyLevel); prefix =std::get<1>(rule->second[0]); } } /** * Retrieve from the map the prefix and the hierarchy name from a given assetname * * @param path assetName to evaluate * @param out/prefix Calculated prefix * @param out/AFHierarchyLevel hierarchy name */ void OMF::retrieveAFHierarchyFullPrefixAssetName(const string& assetName, string& prefix, string& AFHierarchy) { string path; // Metadata Rules - Exist auto rule = m_AssetNamePrefix.find(assetName); if (rule != m_AssetNamePrefix.end()) { AFHierarchy = std::get<0>(rule->second[0]); prefix =std::get<1>(rule->second[0]); } } /** * Handle the OMF hint AFLocation to define a position of the asset into the AF hierarchy * * @param assetName AssetName to handle * @param OmfHintHierarchy Position of the asset into the AF hierarchy * * @return True if set asset will have a defined AF hierarchy position */ bool OMF::createAFHierarchyOmfHint(const string& assetName, const string &OmfHintHierarchy) { string pathNew; string prefix; string AFHierarchyLevel; string prefixStored; string pathStored; bool ruleMatched = false; if (! OmfHintHierarchy.empty()) { pathNew = OmfHintHierarchy; if (pathNew.at(0) != '/') { // relative path pathNew = "/" + pathNew; } generateAFHierarchyPrefixLevel(pathNew, prefix, AFHierarchyLevel); ruleMatched = true; prefixStored = getHashStored (assetName); pathStored = getPathStored (assetName); Logger::getLogger()->debug("%s - OMF hint hierarchy - assetName :%s: path :%s: pathStored :%s: prefixStored :%s: " , __FUNCTION__ , assetName.c_str() , pathNew.c_str() , pathStored.c_str() , prefixStored.c_str() ); if (find(m_afhHierarchyAlreadyCreated.begin(), m_afhHierarchyAlreadyCreated.end(), pathNew) == m_afhHierarchyAlreadyCreated.end()){ Logger::getLogger()->debug("%s - New path requested :%s:", __FUNCTION__, pathNew.c_str()); if (!sendAFHierarchy(pathNew.c_str())) { return false; } } if (pathStored.compare("") == 0) { Logger::getLogger()->debug("%s - New path for the assetName :%s: path :%s:", __FUNCTION__, assetName.c_str(), pathNew.c_str()); auto item = make_pair(pathNew, prefix); m_AssetNamePrefix[assetName].push_back(item); } else { if (pathNew.compare(pathStored) != 0) { Logger::getLogger()->debug("%s - path changed for the assetName :%s: path :%s: previous path :%s:" , __FUNCTION__ , assetName.c_str() , pathNew.c_str() , pathStored.c_str()); if (!deleteAssetAFH(assetName, pathStored)) { return false; } auto item = make_pair(pathNew, prefix); m_AssetNamePrefix[assetName].clear(); m_AssetNamePrefix[assetName].push_back(item); setPathStored (assetName, pathNew); if (!createAssetAFH(assetName, pathNew)) { return false; } } else { Logger::getLogger()->debug("%s - Same path for the assetName :%s: path :%s:", __FUNCTION__, assetName.c_str(), pathNew.c_str()); } } } // For debug // Logger::getLogger()->debug("%s - Hierarchy asset start", __FUNCTION__); // for (auto item=m_AssetNamePrefix.begin(); item!=m_AssetNamePrefix.end(); ++item) // { // auto v = item->second; // // for(auto arrayItem : v) { // // Logger::getLogger()->debug("%s - Hierarchy asset :%s: hash :%s: path :%s:", __FUNCTION__, item->first.c_str(), arrayItem.first.c_str(), arrayItem.second.c_str()); // } // // } return (ruleMatched); } /** * Extracts a variable and its elements from a string, the variable will have the shape ${room:unknown} * * @param strToHandle Source string from which the variable should be extracted * @param variable Variable found in the form ${room:unknown} * @param value Value of the variable, left part , room in this case ${room:unknown} * @param defaultValue Default value of the variable, right part , unknown in this case ${room:unknown} * @return True a variable is found in the source string */ bool OMF::extractVariable(string &strToHandle, string &variable, string &value, string &defaultValue) { bool found; size_t pos1, pos2, pos3; found = false; variable =""; value =""; defaultValue =""; pos1 = strToHandle.find("${"); if (pos1 !=std::string::npos) { pos3 = strToHandle.find("}", pos1); pos2 = strToHandle.find(":", pos1); if ( (pos2 != std::string::npos) && (pos2 < pos3) ) { value = strToHandle.substr(pos1 + 2, (pos2 - (pos1 + 2) ) ); pos3 = strToHandle.find("}", pos2); if (pos3 != std::string::npos) { found = true; defaultValue = strToHandle.substr(pos2 + 1, (pos3 - (pos2 + 1))); variable = strToHandle.substr(pos1, pos3 - pos1 + 1); } } else { Logger::getLogger()->debug("OMF hierarchy hints doesn't have the default value in the metadata reference :%s:", strToHandle.c_str()); // No default value provided if (pos3 != std::string::npos) { found = true; value = strToHandle.substr(pos1 + 2, (pos3 - (pos1 + 2) ) ); variable = strToHandle.substr(pos1, pos3 - pos1 + 1); } } } return(found); } /** * Evaluate the AF hierarchy provided and expand the variables in the form ${room:unknown} * * @param reading Asset reading that should be considered from which to extract the metadata values * @param AFHierarchy AF hierarchy containing the variable to be expanded * * @return True if variables were found and expanded */ std::string OMF::variableValueHandle(const Reading& reading, std::string &AFHierarchy) { string AFHierarchyNew; string propertyToSearch; string propertyValue; string propertyDefault; string variableValue; string propertyName; bool found; bool foundProperty; found = false; AFHierarchyNew = AFHierarchy; if (AFHierarchyNew.find("${") !=std::string::npos) { while (extractVariable(AFHierarchyNew, variableValue , propertyToSearch, propertyDefault)) { auto values = reading.getReadingData(); foundProperty = false; for (auto it = values.begin(); it != values.end(); it++) { propertyName = (*it)->getName(); if (propertyName.compare(propertyToSearch) == 0) { DatapointValue data = (*it)->getData(); propertyValue = data.toString(); found = true; foundProperty = true; } } if (foundProperty) { StringReplaceAll(propertyValue, "\"", ""); StringReplace(AFHierarchyNew, variableValue, propertyValue); } else { StringReplaceAll(propertyValue, "\"", ""); StringReplace(AFHierarchyNew, variableValue, propertyDefault); } } } StringReplaceAll(AFHierarchyNew, "//", "/"); Logger::getLogger()->debug("%s - Variables found :%s: AFHierarchy :%s: AFHierarchyNew :%s: variableValue :%s: propertyToSearch :%s: propertyValue :%s: propertyDefault :%s:" ,__FUNCTION__ ,found ? "true" : "false" ,AFHierarchy.c_str() ,AFHierarchyNew.c_str() ,variableValue.c_str() ,propertyToSearch.c_str() ,propertyValue.c_str() ,propertyDefault.c_str() ); return (AFHierarchyNew); } /** * Evaluated the maps containing the Named and Metadata rules to fill the map m_AssetNamePrefix * containing for each asset name the related prefix and hierarchy name * * @param path assetName to evaluate * @param reading reading row from which will be extracted the datapoint for the evaluation of the rules */ bool OMF::evaluateAFHierarchyRules(const string& assetName, const Reading& reading) { bool ruleMatched = false; bool ruleMatchedNames = false; bool success = true; string pathInitial; string path; bool changed; // names rules - Check if there are any rules defined or not if (!m_AFMapEmptyNames) { if (! m_NamesRules.empty()) { string pathNamingRules; string prefix; string AFHierarchyLevel; auto it = m_NamesRules.find(assetName); if (it != m_NamesRules.end()) { pathInitial = it->second; if (pathInitial.at(0) != '/') { // relative path pathInitial = "/" + m_DefaultAFLocation + "/" + pathInitial; } path = variableValueHandle(reading, pathInitial); path = ApplyPIServerNamingRulesPath(path, &changed); if (pathInitial.compare(path) != 0) { it->second = path; } generateAFHierarchyPrefixLevel(path, prefix, AFHierarchyLevel); ruleMatched = true; ruleMatchedNames = true; auto v = m_AssetNamePrefix[assetName]; auto item = make_pair(path, prefix); if(v.size() == 0 || std::find(v.begin(), v.end(), item) == v.end()) { if (success = sendAFHierarchy(path.c_str())) { m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug( "%s - m_NamesRules size :%d: m_AssetNamePrefix size :%d: vector size :%d: pathInitial :%s: path :%s: stored :%s:" ,__FUNCTION__ ,m_NamesRules.size() ,m_AssetNamePrefix.size() ,v.size() ,pathInitial.c_str() ,path.c_str() ,it->second.c_str()); } else { return false; } } else { Logger::getLogger()->debug( "%s - m_NamesRules skipped pathInitial :%s: path :%s: stored :%s:" ,__FUNCTION__ ,pathInitial.c_str() ,path.c_str() ,it->second.c_str()); } } } } // Meta rules - Check if there are any rules defined or not if (!m_AFMapEmptyMetadata) { auto values = reading.getReadingData(); // Metadata Rules - Exist if (! m_MetadataRulesExist.empty() ) { string propertyName; string prefix; string AFHierarchyLevel; for (auto it = values.begin(); it != values.end(); it++) { propertyName = (*it)->getName(); auto rule = m_MetadataRulesExist.find(propertyName); if (rule != m_MetadataRulesExist.end()) { pathInitial = rule->second;; if (pathInitial.at(0) != '/') { pathInitial = "/" + m_DefaultAFLocation + "/" + pathInitial; } path = variableValueHandle(reading, pathInitial); path = ApplyPIServerNamingRulesPath(path, &changed); generateAFHierarchyPrefixLevel(path, prefix, AFHierarchyLevel); ruleMatched = true; auto v = m_AssetNamePrefix[assetName]; auto item = make_pair(path, prefix); if(v.size() == 0 || std::find(v.begin(), v.end(), item) == v.end()) { if (success = sendAFHierarchy(path.c_str())) { m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesExist asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } else { return false; } } else { Logger::getLogger()->debug("%s - m_MetadataRulesExist already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } } } } // Metadata Rules - NonExist if (! m_MetadataRulesNonExist.empty() ) { string propertyName; string prefix; string AFHierarchyLevel; bool found; string rule; for (auto it = m_MetadataRulesNonExist.begin(); it != m_MetadataRulesNonExist.end(); it++) { found = false; rule = it->first; pathInitial = it->second; for (auto itL2 = values.begin(); found == false && itL2 != values.end(); itL2++) { propertyName = (*itL2)->getName(); if (propertyName.compare(rule) == 0) { found = true; } } if (!found) { if (pathInitial.at(0) != '/') { // relative path pathInitial = "/" +m_DefaultAFLocation + "/" + pathInitial; } path = variableValueHandle(reading, pathInitial); path = ApplyPIServerNamingRulesPath(path, &changed); generateAFHierarchyPrefixLevel(path, prefix, AFHierarchyLevel); ruleMatched = true; auto v = m_AssetNamePrefix[assetName]; auto item = make_pair(path, prefix); if(v.size() == 0 || std::find(v.begin(), v.end(), item) == v.end()) { if (success = sendAFHierarchy(path.c_str())) { m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesNonExist - asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } else { return false; } } else { Logger::getLogger()->debug("%s - m_MetadataRulesNonExist - already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } } } } // Metadata Rules - equal if ( ! m_MetadataRulesEqual.empty() ) { string propertyName; string prefix; string AFHierarchyLevel; bool found; string rule; for (auto it = m_MetadataRulesEqual.begin(); it != m_MetadataRulesEqual.end(); it++) { found = false; rule = it->first; for (auto itL2 = values.begin(); found == false && itL2 != values.end(); itL2++) { propertyName = (*itL2)->getName(); DatapointValue data = (*itL2)->getData(); string dataValue = data.toString(); StringStripQuotes(dataValue); if (propertyName.compare(rule) == 0) { for (auto itL3 = it->second.begin(); found == false && itL3 != it->second.end(); itL3++) { string value = itL3->first; StringStripQuotes(value); pathInitial = itL3->second; if (value.compare(dataValue) == 0) { found = true; } } } } if (found) { if (pathInitial.at(0) != '/') { // relative path pathInitial = "/" + m_DefaultAFLocation + "/" + pathInitial; } path = variableValueHandle(reading, pathInitial); path = ApplyPIServerNamingRulesPath(path, &changed); generateAFHierarchyPrefixLevel(path, prefix, AFHierarchyLevel); ruleMatched = true; auto v = m_AssetNamePrefix[assetName]; auto item = make_pair(path, prefix); if(v.size() == 0 || std::find(v.begin(), v.end(), item) == v.end()) { if (success = sendAFHierarchy(path.c_str())) { m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesEqual asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } else { return false; } } else { Logger::getLogger()->debug("%s - m_MetadataRulesEqual already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } } } } // Metadata Rules - Not equal if ( ! m_MetadataRulesNotEqual.empty() ) { string propertyName; string prefix; string AFHierarchyLevel; string rule; bool NotEqual; for (auto it = m_MetadataRulesNotEqual.begin(); it != m_MetadataRulesNotEqual.end(); it++) { NotEqual = false; rule = it->first; for (auto itL2 = values.begin(); NotEqual == false && itL2 != values.end(); itL2++) { propertyName = (*itL2)->getName(); if (propertyName.compare(rule) == 0) { DatapointValue data = (*itL2)->getData(); string dataValue = data.toString(); StringStripQuotes(dataValue); for (auto itL3 = it->second.begin(); NotEqual == false && itL3 != it->second.end(); itL3++) { string value = itL3->first; pathInitial = itL3->second; StringStripQuotes(value); if (value.compare(dataValue) != 0) { NotEqual = true; } } } } if (NotEqual) { if (pathInitial.at(0) != '/') { // relative path pathInitial = "/" + m_DefaultAFLocation + "/" + pathInitial; } path = variableValueHandle(reading, pathInitial); path = ApplyPIServerNamingRulesPath(path, &changed); generateAFHierarchyPrefixLevel(path, prefix, AFHierarchyLevel); ruleMatched = true; auto v = m_AssetNamePrefix[assetName]; auto item = make_pair(path, prefix); if(v.size() == 0 || std::find(v.begin(), v.end(), item) == v.end()) { if (success = sendAFHierarchy(path.c_str())) { m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("%s - m_MetadataRulesNotEqual asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } else { return false; } } else { Logger::getLogger()->debug("%s - m_MetadataRulesNotEqual already created asset :%s: path added :%s: :%s:" , __FUNCTION__, assetName.c_str(), pathInitial.c_str() , path.c_str() ); } } } } } // If no rules matched the AF default location if ( ! ruleMatched ) { string prefix; string AFHierarchyLevel; generateAFHierarchyPrefixLevel(m_DefaultAFLocation, prefix, AFHierarchyLevel); auto item = make_pair(m_DefaultAFLocation, prefix); auto & curr_vec = m_AssetNamePrefix[assetName]; // Insert new item into m_AssetNamePrefix[assetName] vector, if it doesn't exists already if (std::find(curr_vec.begin(), curr_vec.end(), item) == curr_vec.end()) { m_AssetNamePrefix[assetName].push_back(item); Logger::getLogger()->debug("m_AssetNamePrefix.size()=%d; m_AssetNamePrefix[assetName].size()=%d, added m_AssetNamePrefix[%s]=(%s,%s)", m_AssetNamePrefix.size(), m_AssetNamePrefix[assetName].size(), assetName.c_str(), m_DefaultAFLocation.c_str(), prefix.c_str()); } } return success; } /** * Set the tag ID_XYZ_typename_sensor|typename_measurement * * @param assetName The assetName * @param tagName The tagName to append * @param data The string to append result tag */ void OMF::setAssetTypeTag(const string& assetName, const string& tagName, string& data) { string AFHierarchyPrefix; string AFHierarchyLevel; string keyComplete; // Add the 1st level of AFHierarchy as a prefix to the name in case of PI Web API if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { retrieveAFHierarchyPrefixAssetName (assetName, AFHierarchyPrefix, AFHierarchyLevel); keyComplete = AFHierarchyPrefix + "_" + assetName; } else { keyComplete = assetName; } string AssetTypeTag = to_string(this->getAssetTypeId(assetName)) + "_" + assetName + "_" + tagName; // Add the 1st level of AFHierarchy as a prefix to the name in case of PI Web API if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { AssetTypeTag = "A_" + AFHierarchyPrefix + "_" + AFHierarchyLevel + "_" + AssetTypeTag; } // Add type-id + '_' + asset_name + '_' + tagName' data.append(AssetTypeTag); } /** * Set the tag ID_XYZ_typename_sensor|typename_measurement using the path in which the asset was created * * @param assetName The assetName * @param tagName The tagName to append * @param data The string to append result tag */ void OMF::setAssetTypeTagNew(const string& assetName, const string& tagName, string& data) { string path; string assetPrefix; string AFHierarchyLevel; string keyComplete; // Add the 1st level of AFHierarchy as a prefix to the name in case of PI Web API if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { path=getPathOrigStored (assetName); generateAFHierarchyPrefixLevel(path, assetPrefix, AFHierarchyLevel); keyComplete = assetPrefix + "_" + assetName; } else { keyComplete = assetName; } string AssetTypeTag = to_string(this->getAssetTypeId(assetName)) + "_" + assetName + "_" + tagName; // Add the 1st level of AFHierarchy as a prefix to the name in case of PI Web API if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { AssetTypeTag = "A_" + assetPrefix + "_" + AFHierarchyLevel + "_" + AssetTypeTag; } // Add type-id + '_' + asset_name + '_' + tagName' data.append(AssetTypeTag); } /** * Handles the OMF data types for the current Reading row * DataTypes are created and sent only once per assetName + typeId * if skipSending is true * * @param row The current Reading row with data * @param skipSending Send once or always the data types * @return True if data types have been sent or already sent. * False if the sending has failed. */ bool OMF::handleDataTypes(const string keyComplete, const Reading& row, bool skipSending, OMFHints *hints) { // Create the key for dataTypes sending once const string key(skipSending ? (keyComplete) : ""); // Check whether to create and send Data Types bool sendTypes = (skipSending == true) ? // Send if not already sent !OMF::getCreatedTypes(key, row, hints) : // Always send types true; // Handle the data types of the current reading if (sendTypes && !OMF::sendDataTypes(row, hints)) { // Failure return false; } // We have sent types, we might save this. if (skipSending && sendTypes) { // Save datatypes key OMF::setCreatedTypes(row, hints); } // Success return true; } /** * Get from m_formatTypes map the key (OMF type + OMF format) * * @param key The OMF type for which the format is requested * @return The defined OMF format for the requested type * */ std::string OMF::getFormatType(const string &key) const { string value; try { auto pos = m_formatTypes.find(key); value = pos->second; } catch (const std::exception &e) { Logger::getLogger()->error("Unable to find the OMF format for the type :" + key + ": - error: %s", e.what()); } return value; } /** * Add the key (OMF type + OMF format) into a map * * @param key The OMF type, key of the map * @param value The OMF format to set for the specific OMF type * */ void OMF::setFormatType(const string &key, string &value) { m_formatTypes[key] = value; } /** * Set which PIServer component should be used for the communication */ void OMF::setPIServerEndpoint(const OMF_ENDPOINT PIServerEndpoint) { m_PIServerEndpoint = PIServerEndpoint; } /** * Set the first level of hierarchy in Asset Framework in which the assets will be created, PI Web API only. */ void OMF::setDefaultAFLocation(const string &DefaultAFLocation) { m_DefaultAFLocation = StringSlashFix(DefaultAFLocation); } /** * Set the rules to address where assets should be placed in the AF hierarchy. * Decodes the JSON and assign to the structures the values about the Names rules * */ bool OMF::HandleAFMapNames(Document& JSon) { bool success = true; string name; string value; m_NamesRules.clear(); Value &JsonNames = JSon["names"]; for (Value::ConstMemberIterator itr = JsonNames.MemberBegin(); itr != JsonNames.MemberEnd(); ++itr) { name = itr->name.GetString(); value = itr->value.GetString(); if (m_NamesRules.find(name) == m_NamesRules.end()) { Logger::getLogger()->debug("%s - m_NamesRules size :%d: Exist name :%s: value :%s:" ,__FUNCTION__ ,m_NamesRules.size() ,name.c_str() ,value.c_str()); auto newMapValue = make_pair(name, value); m_NamesRules.insert(newMapValue); m_AFMapEmptyNames = false; } else { Logger::getLogger()->debug("%s - skipped m_NamesRules size :%d: Exist name :%s: value :%s:" ,__FUNCTION__ ,m_NamesRules.size() ,name.c_str() ,value.c_str()); } } return success; } /** * Set the rules to address where assets should be placed in the AF hierarchy. * Decodes the JSON and assign to the structures the values about the Metadata rules * */ bool OMF::HandleAFMapMetedata(Document& JSon) { bool success = true; string name; string value; string variable, variableValue, variableDefault; Value &JsonMetadata = JSon["metadata"]; // --- Handling Exist section if (JsonMetadata.HasMember("exist")) { Value &JSonExist = JsonMetadata["exist"]; for (Value::ConstMemberIterator itr = JSonExist.MemberBegin(); itr != JSonExist.MemberEnd(); ++itr) { bool changed = false; name = itr->name.GetString(); value = itr->value.GetString(); Logger::getLogger()->debug("%s - m_MetadataRulesExist :%s: value :%s:", __FUNCTION__, name.c_str(), value.c_str()); auto newMapValue = make_pair(name,value); m_MetadataRulesExist.insert (newMapValue); extractVariable(value, variable, variableValue, variableDefault); if (! variableDefault.empty()) { m_MetadataRulesNonExist.insert (newMapValue); m_AFMapEmptyMetadata = false; } m_AFMapEmptyMetadata = false; } } // --- Handling Non Exist section if (JsonMetadata.HasMember("nonexist")) { Value &JSonNonExist = JsonMetadata["nonexist"]; for (Value::ConstMemberIterator itr = JSonNonExist.MemberBegin(); itr != JSonNonExist.MemberEnd(); ++itr) { name = itr->name.GetString(); value = itr->value.GetString(); Logger::getLogger()->debug("%s - m_MetadataRulesNonExist :%s: value :%s:", __FUNCTION__, name.c_str(), value.c_str()); auto newMapValue = make_pair(name,value); m_MetadataRulesNonExist.insert (newMapValue); m_AFMapEmptyMetadata = false; } } // --- Handling Equal section if (JsonMetadata.HasMember("equal")) { string property; string value; string path; Value &JSonEqual = JsonMetadata["equal"]; for (Value::ConstMemberIterator itr = JSonEqual.MemberBegin(); itr != JSonEqual.MemberEnd(); ++itr) { property = itr->name.GetString(); for (Value::ConstMemberIterator itrL2 = itr->value.MemberBegin(); itrL2 != itr->value.MemberEnd(); ++itrL2) { value = itrL2->name.GetString(); path = itrL2->value.GetString(); Logger::getLogger()->debug("%s - m_MetadataRulesEqual :%s: name :%s: value :%s:", __FUNCTION__, property.c_str() , value.c_str(), path.c_str()); auto item = make_pair(value,path); m_MetadataRulesEqual[property].push_back(item); m_AFMapEmptyMetadata = false; } } } // --- Handling Not Equal section if (JsonMetadata.HasMember("notequal")) { string property; string value; string path; Value &JSonEqual = JsonMetadata["notequal"]; for (Value::ConstMemberIterator itr = JSonEqual.MemberBegin(); itr != JSonEqual.MemberEnd(); ++itr) { property = itr->name.GetString(); for (Value::ConstMemberIterator itrL2 = itr->value.MemberBegin(); itrL2 != itr->value.MemberEnd(); ++itrL2) { value = itrL2->name.GetString(); path = itrL2->value.GetString(); Logger::getLogger()->debug("%s - m_MetadataRulesNotEqual :%s: name :%s: value :%s:", __FUNCTION__, property.c_str() , value.c_str(), path.c_str()); auto item = make_pair(value,path); m_MetadataRulesNotEqual[property].push_back(item); m_AFMapEmptyMetadata = false; } } } return success; } /** * Set the Names and Metadata rules to address where assets should be placed in the AF hierarchy. * */ bool OMF::setAFMap(const string &AFMap) { bool success = true; Document JSon; m_AFMapEmptyNames = true; m_AFMapEmptyMetadata = true; m_AFMap = AFMap; ParseResult ok = JSon.Parse(m_AFMap.c_str()); if (!ok) { Logger::getLogger()->error("setAFMap - Invalid Asset Framework Map, error :%s:", GetParseError_En(JSon.GetParseError())); return false; } if (JSon.HasMember("names")) { HandleAFMapNames(JSon); } if (JSon.HasMember("metadata")) { HandleAFMapMetedata(JSon); } return success; } /** * Set the first level of hierarchy in Asset Framework in which the assets will be created, PI Web API only. */ void OMF::setPrefixAFAsset(const string &prefixAFAsset) { m_prefixAFAsset = prefixAFAsset; } /** * Generate an unique prefix for AF objects */ string OMF::generateUniquePrefixId(const string &path) { string prefix; std::size_t hierarchyHash = std::hash<std::string>{}(path); prefix = std::to_string(hierarchyHash); return prefix; } /** * Set the list of errors considered not blocking in the communication * with the PI Server */ void OMF::setNotBlockingErrors(std::vector<std::string>& notBlockingErrors) { m_notBlockingErrors = notBlockingErrors; } /** * Increment type-id */ void OMF::incrementTypeId() { ++m_typeId; } /** * Clear OMF types cache */ void OMF::clearCreatedTypes() { if (m_OMFDataTypes) { m_OMFDataTypes->clear(); } } /** * Check for invalid/redefinition data type error * * @param message Server reply message for data type creation * @return True for data type error, false otherwise */ bool OMF::isDataTypeError(const char* message) { if (message) { string serverReply(message); for(string &item : m_notBlockingErrors) { if (serverReply.find(item) != std::string::npos) { return true; } } } return false; } /** * Send again Data Types of current reading data * with a new type-id * * NOTE: the m_typeId member variable value is incremented. * * @param reading The current reading data * @return True if data types with new-id * have been sent, false otherwise. */ bool OMF::handleTypeErrors(const string& keyComplete, const Reading& reading, OMFHints *hints) { Logger::getLogger()->debug("handleTypeErrors keyComplete :%s:", keyComplete.c_str()); bool ret = true; string assetName = m_assetName; // Reset change type-id indicator m_changeTypeId = false; // Increment per asset type-id in memory cache: // Note: if key is not found the global type-id is incremented OMF::incrementAssetTypeId(keyComplete); // Clear per asset data (but keep the type-id) if key found // or remove all data otherwise auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Clear the OMF types cache per asset, keep type-id OMF::clearCreatedTypes(keyComplete); } else { // Remove all cached data, any asset OMF::clearCreatedTypes(); } // Force re-send data types with a new type-id if (!OMF::handleDataTypes(keyComplete, reading, false, hints)) { Logger::getLogger()->error("Failure re-sending JSON dataType messages " "with new type-id=%d for asset %s", OMF::getAssetTypeId(assetName), assetName.c_str()); // Failure ret = false; } return ret; } /** * Create a superset data map for each reading and found datapoints * * The output map is filled with a Reading object containing * all the datapoints found for each asset in the input reading set. * The datapoints have a fake value based on the datapoint type * * @param readings Current input readings data * @param dataSuperSet Map to store all datapoints for an assetname */ void OMF::setMapObjectTypes(const vector<Reading*>& readings, std::map<std::string, Reading*>& dataSuperSet) { // Temporary map for [asset][datapoint] = type std::map<string, map<string, string>> readingAllDataPoints; // Fetch ALL Reading pointers in the input vector // and create a map of [assetName][datapoint1 .. datapointN] = type for (vector<Reading *>::const_iterator elem = readings.begin(); elem != readings.end(); ++elem) { // Get asset name string assetName = ApplyPIServerNamingRulesObj((**elem).getAssetName(), nullptr); //string assetName = (**elem).getAssetName(); // Get all datapoints const vector<Datapoint*> data = (**elem).getReadingData(); // Iterate through datapoints for (vector<Datapoint*>::const_iterator it = data.begin(); it != data.end(); ++it) { string omfType; string datapointName = (*it)->getName(); if (!isTypeSupported((*it)->getData())) { omfType = OMF_TYPE_UNSUPPORTED; Logger::getLogger()->debug("%s - The type of the datapoint " + assetName + "/" + datapointName + " is unsupported, it will be ignored", __FUNCTION__); } else { omfType = omfTypes[((*it)->getData()).getType()]; // if an OMF hint is applied the type may change { Reading *reading = *elem; // Fetch and parse any OMFHint for this reading Datapoint *hintsdp = reading->getDatapoint("OMFHint"); if (hintsdp && (omfType == OMF_TYPE_FLOAT || omfType == OMF_TYPE_INTEGER)) { OMFHints *hints = new OMFHints(hintsdp->getData().toString()); const vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFIntegerHint)) { omfType = OMF_TYPE_INTEGER; break; } } delete hints; } } auto itr = readingAllDataPoints.find(assetName); // Asset not found in the map if (itr == readingAllDataPoints.end()) { // Set type of current datapoint for assetName readingAllDataPoints[assetName][datapointName] = omfType; } else { // Asset found auto dpItr = (*itr).second.find(datapointName); // Datapoint not found if (dpItr == (*itr).second.end()) { // Add datapointName/type to map with key assetName (*itr).second.emplace(datapointName, omfType); } else { if ((*dpItr).second.compare(omfType) != 0) { // Datapoint already set has changed type Logger::getLogger()->info("Datapoint '" + datapointName + \ "' in asset '" + assetName + \ "' has changed type from '" + (*dpItr).second + \ " to " + omfType); } // Update datapointName/type to map with key assetName // 1- remove element (*itr).second.erase(dpItr); // 2- Add new value readingAllDataPoints[assetName][datapointName] = omfType; } } } } } // Loop now only the elements found in the per asset types map for (auto it = readingAllDataPoints.begin(); it != readingAllDataPoints.end(); ++it) { string assetName = (*it).first; vector<Datapoint *> values; // Set fake datapoints values for (auto dp = (*it).second.begin(); dp != (*it).second.end(); ++dp) { if ((*dp).second.compare(OMF_TYPE_FLOAT) == 0) { DatapointValue vDouble(0.1); values.push_back(new Datapoint((*dp).first, vDouble)); } else if ((*dp).second.compare(OMF_TYPE_INTEGER) == 0) { DatapointValue vInt((long)1); values.push_back(new Datapoint((*dp).first, vInt)); } else if ((*dp).second.compare(OMF_TYPE_STRING) == 0) { DatapointValue vString("v_str"); values.push_back(new Datapoint((*dp).first, vString)); } else if ((*dp).second.compare(OMF_TYPE_UNSUPPORTED) == 0) { Logger::getLogger()->debug("%s - The asset '" + assetName + " has a datapoint having an unsupported type, it will be ignored", __FUNCTION__); // Avoids forcing PI Server to handle unsupported datapoint types // std::vector<double> vData = {0}; // DatapointValue vArray(vData); // values.push_back(new Datapoint((*dp).first, vArray)); } } // Add the superset Reading data with fake values dataSuperSet.emplace(assetName, new Reading(assetName, values)); } } /** * Cleanup the mapped object types for input data * * @param dataSuperSet The mapped object to cleanup */ void OMF::unsetMapObjectTypes(std::map<std::string, Reading*>& dataSuperSet) const { // Remove all assets supersetDataPoints for (auto m = dataSuperSet.begin(); m != dataSuperSet.end(); ++m) { (*m).second->removeAllDatapoints(); delete (*m).second; } dataSuperSet.clear(); } /** * Extract assetName from error message * * Currently handled cases * (1) $datasource + "." + $id + "_" + $assetName + "_typename_measurement" + ... * (2) $id + "measurement_" + $assetName * * @param message OMF error message (JSON) * @return The found assetName if found, or empty string */ string OMF::getAssetNameFromError(const char* message) { string assetName; Document error; error.Parse(message); if (!error.HasParseError() && error.HasMember("source") && error["source"].IsString()) { string tmp = error["source"].GetString(); // (1) $datasource + "." + $id + "_" + $assetName + "_typename_measurement" + ... size_t found = tmp.find("_typename_measurement"); if (found != std::string::npos) { tmp = tmp.substr(0, found); found = tmp.find_first_of(m_delimiter[0]); if (found != std::string::npos && found < tmp.length()) { tmp = tmp.substr(found + 1); found = tmp.find_first_of('_'); if (found != std::string::npos && found < tmp.length()) { // bug fixed //assetName = assetName.substr(found + 1 ); assetName = tmp.substr(found + 1 ); } } } else { // (2) $id + "measurement_" + $assetName found = tmp.find_first_of('_'); if (found != std::string::npos && found < tmp.length()) { assetName = tmp.substr(found + 1); } } } return assetName; } /** * Return the asset type-id * * @param assetName The asset name * @return The found type-id * or the generic value */ long OMF::getAssetTypeId(const string& assetName) { long typeId; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; // Connector relay / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(assetName) == NAMINGSCHEME_CONCISE) { keyComplete = assetName; } else { retrieveAFHierarchyPrefixAssetName(assetName, AFHierarchyPrefix, AFHierarchyLevel); keyComplete = AFHierarchyPrefix + "_" + assetName; } } if (!m_OMFDataTypes) { // Use current value of m_typeId typeId = m_typeId; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element typeId = ((*it).second).typeId; } else { // Use current value of m_typeId typeId = m_typeId; } } return typeId; } /** * Retrieve the naming scheme for the given asset in relation to the end point selected the default naming scheme selected * and the naming scheme of the asset itself * * @param assetName Asset for which the naming schema should be retrieved * @return Naming schema of the given asset */ long OMF::getNamingScheme(const string& assetName) { long namingScheme; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; // Connector relay / ADH / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { retrieveAFHierarchyPrefixAssetName(assetName, AFHierarchyPrefix, AFHierarchyLevel); keyComplete = AFHierarchyPrefix + "_" + assetName; } if (!m_OMFDataTypes) { // Use current value of m_typeId namingScheme = m_NamingScheme; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element namingScheme = ((*it).second).namingScheme; } else { keyComplete = assetName; auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element namingScheme = ((*it).second).namingScheme; } else { // Use current value of m_typeId namingScheme = m_NamingScheme; } } } return namingScheme; } /** * Retrieve the hash for the given asset in relation to the end point selected * * @param assetName Asset for which the hash should be retrieved * @return Hash of the given asset */ string OMF::getHashStored(const string& assetName) { string hash; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; // Connector relay / ADH / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(assetName) == NAMINGSCHEME_CONCISE) { keyComplete = assetName; } else { keyComplete = AFHierarchyPrefix + "_" + assetName; } } if (!m_OMFDataTypes) { hash = ""; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element hash = ((*it).second).afhHash; } else { hash = ""; } } return hash; } /** * Retrieve the current AF hierarchy for the given asset * * @param assetName Asset for which the path should be retrieved * @return Path of the given asset */ string OMF::getPathStored(const string& assetName) { string afHierarchy; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; // Connector relay / ADH / OCS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(assetName) == NAMINGSCHEME_CONCISE) { keyComplete = assetName; } else { keyComplete = AFHierarchyPrefix + "_" + assetName; } } if (!m_OMFDataTypes) { afHierarchy = ""; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element afHierarchy = ((*it).second).afHierarchy; } else { afHierarchy = ""; } } return afHierarchy; } /** * Retrieve the AF hierarchy in which given asset was created * * @param assetName Asset for which the path should be retrieved * @return Path of the given asset */ string OMF::getPathOrigStored(const string& assetName) { string afHierarchy; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; // Connector relay / ADH / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(assetName) == NAMINGSCHEME_CONCISE) { keyComplete = assetName; } else { keyComplete = AFHierarchyPrefix + "_" + assetName; } } if (!m_OMFDataTypes) { afHierarchy = ""; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element afHierarchy = ((*it).second).afHierarchyOrig; } else { afHierarchy = ""; } } return afHierarchy; } /** * Stores the current AF hierarchy for the given asset * * @param assetName Asset for which the path should be retrieved * @param afHierarchy Current AF hierarchy of the asset * * @return True if the operation has success */ bool OMF::setPathStored(const string& assetName, string &afHierarchy) { bool operationExecuted; string keyComplete; string AFHierarchyPrefix; string AFHierarchyLevel; // Connector relay / ADH / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(assetName) == NAMINGSCHEME_CONCISE) { keyComplete = assetName; } else { keyComplete = AFHierarchyPrefix + "_" + assetName; } } operationExecuted = false; if (!m_OMFDataTypes) { operationExecuted = false; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Set the type-id of found element ((*it).second).afHierarchy = afHierarchy; operationExecuted = true; } else { operationExecuted = false; } } return operationExecuted; } /** * Increment the type-id for the given asset name * * If cached data pointer is NULL or asset name is not set * the global m_typeId is incremented. * * @param keyComplete The asset name * which type-id sequence * has to be incremented. */ void OMF::incrementAssetTypeId(const std::string& keyComplete) { long typeId; if (!m_OMFDataTypes) { // Increment current value of m_typeId OMF::incrementTypeId(); } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Increment value of found type-id ++((*it).second).typeId; } else { // Increment current value of m_typeId OMF::incrementTypeId(); } } } /** * Increment the type-id for the given asset name * * If cached data pointer is NULL or asset name is not set * the global m_typeId is incremented. * * @param keyComplete The asset name * which type-id sequence * has to be incremented. */ void OMF::incrementAssetTypeIdOnly(const std::string& keyComplete) { long typeId; if (m_OMFDataTypes) { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Increment value of found type-id ++((*it).second).typeId; } } } /** * Generate a 64 bit number containing a set of counts, * number of datapoint in an asset and the number of datapoint of each type we support. * */ unsigned long OMF::calcTypeShort(const Reading& row) { t_typeCount typeCount; int type; const vector<Datapoint*> data = row.getReadingData(); for (vector<Datapoint*>::const_iterator it = data.begin(); (it != data.end() && isTypeSupported((*it)->getData())); ++it) { string dpName = (*it)->getName(); if (!isTypeSupported((*it)->getData())) { continue; } if (dpName.compare(OMF_HINT) == 0) { // We never include OMF hints in the data we send to PI continue; } type = ((*it)->getData()).getType(); // Integer is handled as float in the OMF integration if (type == DatapointValue::dataTagType::T_INTEGER) { typeCount.cnt.tFloat++; } if (type == DatapointValue::dataTagType::T_FLOAT) { typeCount.cnt.tFloat++; } if (type == DatapointValue::dataTagType::T_STRING) { typeCount.cnt.tString++; } typeCount.cnt.tTotal++; } return typeCount.valueLong; } /** * Add the reading asset namekey into a map * That key is checked by getCreatedTypes in order * to send dataTypes only once * * @param row The reading data row * @return True, false if map pointer is NULL */ bool OMF::setCreatedTypes(const Reading& row, OMFHints *hints) { string types; string keyComplete; string assetName; string AFHierarchyPrefix; string AFHierarchy; if (!m_OMFDataTypes) { return false; } assetName = m_assetName; retrieveAFHierarchyFullPrefixAssetName(assetName, AFHierarchyPrefix, AFHierarchy); // Connector relay / ADH / ODS / EDS if (m_PIServerEndpoint == ENDPOINT_CR || m_PIServerEndpoint == ENDPOINT_ADH || m_PIServerEndpoint == ENDPOINT_OCS || m_PIServerEndpoint == ENDPOINT_EDS ) { keyComplete = m_assetName; } else if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (getNamingScheme(assetName) == NAMINGSCHEME_CONCISE) { keyComplete = assetName; } else { keyComplete = AFHierarchyPrefix + "_" + assetName; } } // We may need to add the hint to the key if we have a TypeName key if (hints) { const vector<OMFHint *> omfHints = hints->getHints(); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFTypeNameHint)) { Logger::getLogger()->debug("Using OMF TypeName hint: %s", (*it)->getHint().c_str()); keyComplete.append("_" + (*it)->getHint()); break; } } } long typeId = OMF::getAssetTypeId(keyComplete); const vector<Datapoint*> data = row.getReadingData(); types.append("{"); bool first = true; for (vector<Datapoint*>::const_iterator it = data.begin(); (it != data.end() && isTypeSupported((*it)->getData())); ++it) { string dpName = (*it)->getName(); if (dpName.compare(OMF_HINT) == 0) { // We never include OMF hints in the data we send to PI continue; } if (!first) { types.append(", "); } else { first = false; } string omfType; if (!isTypeSupported((*it)->getData())) { omfType = OMF_TYPE_UNSUPPORTED; continue; } else { omfType = omfTypes[((*it)->getData()).getType()]; } string format = OMF::getFormatType(omfType); if (hints && (omfType == OMF_TYPE_FLOAT || omfType == OMF_TYPE_INTEGER)) { const vector<OMFHint *> omfHints = hints->getHints(dpName); for (auto it = omfHints.cbegin(); it != omfHints.cend(); it++) { if (typeid(**it) == typeid(OMFNumberHint)) { format = (*it)->getHint(); break; } if (typeid(**it) == typeid(OMFIntegerHint)) { omfType = OMF_TYPE_INTEGER; format = (*it)->getHint(); break; } } } // Add datapoint Name types.append("\"" + dpName + "\""); types.append(": {\"type\": \""); // Add datapoint Type types.append(omfType); // Applies a format if it is defined if (!format.empty()) { types.append("\", \"format\": \""); types.append(format); } types.append("\"}"); } types.append("}"); if (m_OMFDataTypes->find(keyComplete) == m_OMFDataTypes->end()) { // New entry OMFDataTypes newData; // Start from default as we don't have anything in the cache newData.typeId = m_typeId; newData.types = types; (*m_OMFDataTypes)[keyComplete] = newData; } else { // Just update dataTypes and keep the typeId (*m_OMFDataTypes)[keyComplete].types = types; } (*m_OMFDataTypes)[keyComplete].typesShort = calcTypeShort(row); (*m_OMFDataTypes)[keyComplete].hintChkSum = hints ? hints->getChecksum() : 0; (*m_OMFDataTypes)[keyComplete].namingScheme = m_NamingScheme; (*m_OMFDataTypes)[keyComplete].afhHash = AFHierarchyPrefix; (*m_OMFDataTypes)[keyComplete].afHierarchy = AFHierarchy; (*m_OMFDataTypes)[keyComplete].afHierarchyOrig = AFHierarchy; Logger::getLogger()->debug("%s - keyComplete :%s: m_NamingScheme :%ld: AFHierarchyPrefix :%s: AFHierarchy :%s: " , __FUNCTION__ , keyComplete.c_str() , m_NamingScheme , AFHierarchyPrefix.c_str() , AFHierarchy.c_str() ); return true; } /** * Set a new value for global type-id * * new value is the maximum value of * type-id among all asset datatypes * or * the current value of m_typeId */ void OMF::setTypeId() { long maxId = m_typeId; for (auto it = m_OMFDataTypes->begin(); it != m_OMFDataTypes->end(); ++it) { if ((*it).second.typeId > maxId) { maxId = (*it).second.typeId; } } m_typeId = maxId; } /** * Clear OMF types cache for given asset name * but keep the type-id */ void OMF::clearCreatedTypes(const string& keyComplete) { if (m_OMFDataTypes) { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { // Just clear data types (*it).second.types = ""; } } } /** * Check the key (assetName) is set and not empty * in the per asset data types cache. * * @param keyComplete The data type key (assetName) from the Reading row * @return True if the key exists and data value is not empty: * this means the dataTypes were already sent * Found key with empty value means the data types * must be sent again with the new type-id. * Return false if the key is not found or found but empty. */ bool OMF::getCreatedTypes(const string& keyComplete, const Reading& row, OMFHints *hints) { Logger::getLogger()->debug("OMF::getCreatedTypes: Key: %s Asset: %s (%s)", keyComplete.c_str(), row.getAssetName().c_str(), DataPointNamesAsString(row).c_str()); unsigned long typesDefinition; bool ret = false; bool found = false; t_typeCount typeStored; t_typeCount typeNew; if (!m_OMFDataTypes) { ret = false; } else { auto it = m_OMFDataTypes->find(keyComplete); if (it != m_OMFDataTypes->end()) { OMFDataTypes& type = it->second; ret = ! type.types.empty(); if (ret) { // Considers empty also the case "{}" if (type.types.compare("{}") == 0) { ret = false; } else { // The Connector Relay recreates the type only when an error is received from the PI-Server // not in advance if (m_PIServerEndpoint != ENDPOINT_CR) { if (hints && type.hintChkSum != hints->getChecksum()) { ret = false; } else { // Check if the defined type has changed respect the superset type Reading* datatypeStructure = NULL; auto itSuper = m_SuperSetDataPoints.find(m_assetName); if (itSuper != m_SuperSetDataPoints.end()) { datatypeStructure = (*itSuper).second; // Check if the types are changed typeStored.valueLong = type.typesShort; typeNew.valueLong = calcTypeShort(*datatypeStructure); if (typeNew.cnt.tTotal > typeStored.cnt.tTotal || typeNew.cnt.tFloat > typeStored.cnt.tFloat || typeNew.cnt.tString > typeStored.cnt.tString ) { ret = false; } } } } } } Logger::getLogger()->debug("OMF::getCreatedTypes: Id: %ld Tshort: %lu Types: %s", type.typeId, type.typesShort, type.types.c_str()); } } return ret; } /** * Check whether input Datapoint type is supported by OMF class * * @param dataPoint Input data * @return True if supported, false otherwise */ static bool isTypeSupported(DatapointValue& dataPoint) { switch (dataPoint.getType()) { case DatapointValue::DatapointTag::T_FLOAT: case DatapointValue::DatapointTag::T_INTEGER: case DatapointValue::DatapointTag::T_STRING: return true; default: return false; } } /** * Find the best available exception message. * This will be either the Message from the OMF REST JSON response, * or an std::exception message. * * @param exception std::exception object * @param error OMFError object to be populated * @return Best available exception message */ std::string OMF::getExceptionMessage(const std::exception &e, OMFError *error) { std::string exceptionMessage = std::string(e.what()); std::string httpResponse = m_sender.getHTTPResponse(); // Check if either the httpResponse or the std::exception message contain an OMF JSON response if (httpResponse.empty()) { error->setFromHttpResponse(exceptionMessage); } else { error->setFromHttpResponse(httpResponse); } // If OMFError indicates it has messages, an OMF response JSON document must have been available. // Return the first message. If OMFError has no messages, return the std::exception message instead. if (error->hasMessages()) { return error->getMessage(0); } else { return exceptionMessage; } } /** * Process an std::exception generated by an OMF REST call. * * @param exception std::exception object * @param mainMessage Main message text for the logged message */ void OMF::handleRESTException(const std::exception &e, const char *mainMessage) { OMFError error; std::string errorMsg = getExceptionMessage(e, &error); if (error.hasMessages()) { error.Log(mainMessage); CheckHttpCode(error.getHttpCode(), errorMsg); } else { Logger::getLogger()->error("%s, %s - %s %s", mainMessage, errorMessageHandler(errorMsg).c_str(), m_sender.getHostPort().c_str(), m_path.c_str()); CheckHttpCode(HTTPCodeFromErrorMessage(errorMsg), errorMsg); } // Check for any error messages that indicate a loss of connection int i = 0; while (strlen(noConnectionErrorMessages[i])) { if (0 == strncmp(e.what(), noConnectionErrorMessages[i], strlen(noConnectionErrorMessages[i]))) { m_connected = false; Logger::getLogger()->warn("Connection to the destination data archive has been lost"); break; } i++; } } /** * Check the HTTP response code and the error message for conditions * indicating loss of connection or instability in the PI Server * * @param httpCode HTTP response code from REST call * @param errorMessage Error message from REST call */ void OMF::CheckHttpCode(const int httpCode, const std::string &errorMessage) { switch (httpCode) { case 404: // Not Found if (errorMessage.compare(PIWEBAPI_CONTAINER_NOT_FOUND) == 0) { Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, httpCode); m_PIstable = false; } break; case 413: // Request Entity Too Large m_numBlocks++; Logger::getLogger()->warn("Next POST of Readings will take place in %lu blocks", m_numBlocks); break; case 500: // Internal Server Error if (errorMessage.compare(PIWEBAPI_PIPOINTS_NOT_CREATED) == 0) { // This can occur for a Container message if the PI license has expired, // or if the plugin's PI user account lacks permission to create PI points. Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, httpCode); m_PIstable = false; } // TODO: determine exactly what a PI Web API update exception for a Data message means. // It can mean PI Web API server running but the software is not, therefore loss of connection. // It can also means transient data error: PI Web API is running, PI is stable but the data is in error. // In all cases, a HTTP 500 will cause plugin_send to return zero which means a failure // else if (errorMessage.compare(PIWEBAPI_UPDATE_EXCEPTION) == 0) // { // Logger::getLogger()->warn("Connection to the destination data archive has been lost"); // Logger::getLogger()->debug("%s: %d", __FUNCTION__, httpCode); // m_connected = false; // } break; case 503: // Service Unavailable Logger::getLogger()->warn("Connection to the destination data archive has been lost"); m_connected = false; break; default: break; } } /** * Check a PI Server name and returns the proper name to use following the naming rules * * Invalid chars: Control characters plus: * ? ; { } [ ] | \ ` ' " * * @param objName The object name to verify * @param changed if not null, it is set to true if a change occurred * @return Object name following the PI Server naming rules */ std::string OMF::ApplyPIServerNamingRulesInvalidChars(const std::string &objName, bool *changed) { std::string nameFixed; if (changed) *changed = false; nameFixed = objName; for (size_t i = 0; i < nameFixed.length(); i++) { if ( nameFixed[i] == '*' || nameFixed[i] == '?' || nameFixed[i] == ';' || nameFixed[i] == '{' || nameFixed[i] == '}' || nameFixed[i] == '[' || nameFixed[i] == ']' || nameFixed[i] == '|' || nameFixed[i] == '\\' || nameFixed[i] == '`' || nameFixed[i] == '\'' || nameFixed[i] == '\"' || iscntrl(nameFixed[i]) ) { nameFixed.replace(i, 1, "_"); if (changed) *changed = true; } } return (nameFixed); } /** * Check a PI Server object name and returns the proper name to use following the naming rules: * * - Blank names are not permitted, substituted with '_' * - Trailing spaces are removed * - Maximum name length is 200 characters. * - Valid chars * - Names cannot begin with '__', These are reserved for system use, substituted with single '_' * * Note: Names on PI-Server side are not case sensitive * * @param objName The object name to verify * @param changed if not null, it is set to true if a change occur * @return Object name following the PI Server naming rules */ std::string OMF::ApplyPIServerNamingRulesObj(const std::string &objName, bool *changed) { std::string nameFixed; if (changed) *changed = false; nameFixed = StringTrim(objName); if (nameFixed.empty ()) { Logger::getLogger()->debug("%s - object name empty", __FUNCTION__); nameFixed = "_"; if (changed) *changed = true; } else { if (nameFixed.length() > 201) { nameFixed = nameFixed.substr(0, 200); if (changed) *changed = true; Logger::getLogger()->warn("%s - object name too long, truncated to :%s: ", __FUNCTION__, nameFixed.c_str() ); } } nameFixed = ApplyPIServerNamingRulesInvalidChars(nameFixed, changed); /// Names cannot begin with '__'. These are reserved for system use. if ( nameFixed[0] == '_' && nameFixed[1] == '_' ) { nameFixed.erase(0, 1); if (changed) *changed = true; } if (objName.compare(nameFixed) != 0) { Logger::getLogger()->debug("%s - original :%s: trimmed :%s:", __FUNCTION__, objName.c_str(), nameFixed.c_str()); } return (nameFixed); } /** * Create a comma-separated string of all Datapoint names in a Reading * * @param reading Reading * @return Datapoint names in the Reading */ std::string DataPointNamesAsString(const Reading& reading) { std::string dataPointNames; for (Datapoint *datapoint : reading.getReadingData()) { dataPointNames.append(datapoint->getName()); dataPointNames.append(","); } if (dataPointNames.size() > 0) { dataPointNames.resize(dataPointNames.size() - 1); // remove trailing comma } return dataPointNames; } /** * Check a PI Server path name and returns the proper name to use following the naming rules: * * - Blank names are not permitted, substituted with '_' * - Trailing spaces are removed * - Maximum name length is 200 characters. * - Valid chars * - Names cannot begin with '__', These are reserved for system use, substituted with single '_' * * Names on PI-Server side are not case sensitive * * @param objName The object name to verify * @param changed if not null, it is set to true if a change occurred * @return Object name following the PI Server naming rules */ std::string OMF::ApplyPIServerNamingRulesPath(const std::string &objName, bool *changed) { std::string nameFixed; if (changed) *changed = false; nameFixed = StringTrim(objName); if (nameFixed.empty ()) { Logger::getLogger()->debug("%s - path empty", __FUNCTION__); nameFixed = "_"; if (changed) *changed = true; } else { if (nameFixed.length() > 201) { nameFixed = nameFixed.substr(0, 200); if (changed) *changed = true; Logger::getLogger()->warn("%s - path too long, truncated to :%s: ", __FUNCTION__, nameFixed.c_str() ); } } nameFixed = ApplyPIServerNamingRulesInvalidChars(nameFixed, changed); /// Names cannot begin with '__'. These are reserved for system use. if ( nameFixed[0] == '_' && nameFixed[1] == '_' ) { nameFixed.erase(0, 1); if (changed) *changed = true; } if (nameFixed.find("/__") != string::npos) { StringReplaceAll(nameFixed,"/__","/_"); if (changed) *changed = true; } if (objName.compare(nameFixed) != 0) { Logger::getLogger()->debug("%s - original :%s: trimmed :%s:", __FUNCTION__, objName.c_str(), nameFixed.c_str()); } return (nameFixed); } /** * Send the base types that we use to define all the data point values * * @return true If the data types were sent correctly. Otherwise false. */ bool OMF::sendBaseTypes() { vector<pair<string, string>> resType = OMF::createMessageHeader("Type"); // Build an HTTPS POST with 'resType' headers // and 'typeData' JSON payload // Then get HTTPS POST ret code and return 0 to client on error try { int res = m_sender.sendRequest("POST", m_path, resType, baseOMFTypes); if ( ! (res >= 200 && res <= 299) ) { Logger::getLogger()->error("Sending base data types message 'Type', HTTP code %d - %s %s", res, m_sender.getHostPort().c_str(), m_path.c_str()); return false; } else if (res == 201) { Logger::getLogger()->info("Created basic data types"); } else { Logger::getLogger()->info("Confirmed basic data types"); } } // Exception raised for HTTP 400 Bad Request catch (const BadRequest& e) { string errorMsg; OMFError error(m_sender.getHTTPResponse()); if (error.hasMessages()) { error.Log("The OMF endpoint reported a Bad Request when sending base types"); errorMsg = error.getMessage(0); } else { errorMsg = errorMessageHandler(e.what()); } if (OMF::isDataTypeError(e.what())) { // Data type error: force type-id change m_changeTypeId = true; } Logger::getLogger()->warn("Sending dataType message 'Type', not blocking issue: %s %s - %s %s", (m_changeTypeId ? "Data Type " : "" ), errorMsg.c_str(), m_sender.getHostPort().c_str(), m_path.c_str()); return false; } catch (const Unauthorized &e) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); return false; } catch (const Conflict& e) { handleRESTException(e, "The OMF endpoint reported a Conflict when sending base types"); Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, 409); m_PIstable = false; return false; } catch (const std::exception &e) { handleRESTException(e, "Sending Basic Types error"); return false; } Logger::getLogger()->debug("Base types successfully sent"); return true; } /** * Create the FledgeAsset OMF Type which will define an AF Template. * The AF Template will be used to create AF Elements to represent Containers for Linked Types. * * @return true If the FledgeAsset Type was sent correctly */ bool OMF::sendFledgeAssetType() { OMFBuffer writer; writer.append('['); writer.append('{'); writer.append("\"id\":\"FledgeAsset\","); writer.append("\"type\":\"object\","); writer.append("\"classification\":\"static\","); writer.append("\"properties\":{"); writer.append("\"AssetId\":{"); writer.append("\"type\":\"string\","); writer.append("\"isindex\":true"); writer.append("},"); for (std::pair<std::string, std::string> &sData : *m_staticData) { writer.append('\"'); writer.append(sData.first); writer.append("\":{"); writer.append("\"type\":\"string\""); writer.append("},"); } writer.append("\"Name\":{"); writer.append("\"type\":\"string\","); writer.append("\"isname\":true"); writer.append('}'); writer.append('}'); writer.append('}'); writer.append(']'); const char *payload = writer.coalesce(); Logger::getLogger()->debug("%s: %s", __FUNCTION__, payload); bool retCode = false; try { vector<pair<string, string>> resType = OMF::createMessageHeader("Type"); int res = m_sender.sendRequest("POST", m_path, resType, payload); Logger::getLogger()->info((res == 201) ? "Created FledgeAsset Type" : "Confirmed FledgeAsset Type"); retCode = true; } catch (const Unauthorized &e) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); } catch (const Conflict& e) { Logger::getLogger()->warn("FledgeAsset Type exists with a different definition"); retCode = true; } catch (const std::exception &e) { handleRESTException(e, "Sending FledgeAsset Type error"); } delete [] payload; return retCode; } /** * Send a message to link the asset into the right place in the AF structure * * @param reading The reading being sent * @param hints OMF Hints for this reading * @return true if the message was sent correctly, otherwise false. */ bool OMF::sendAFLinks(Reading &reading, OMFHints *hints) { bool success = true; std::string afLinks = createAFLinks(reading, hints); if (afLinks.empty()) { return success; } afLinks = "[" + afLinks + "]"; std::vector<std::pair<std::string, std::string>> links; parseLinkData(afLinks, links); try { vector<pair<string, string>> messageHeader = OMF::createMessageHeader("Data", m_dataActionCode); int res = m_sender.sendRequest("POST", m_path, messageHeader, afLinks); if (res >= 200 && res <= 299) { Logger::getLogger()->debug("AF Link message sent successfully: %s", afLinks.c_str()); success = true; LogLinks(res, (res == 201) ? "Created Link" : "Confirmed Link", links); } else { LogLinks(res, "creating Link", links); Logger::getLogger()->error("Sending AF Link Data message, HTTP code %d - %s %s", res, m_sender.getHostPort().c_str(), m_path.c_str()); success = false; } } catch (const BadRequest &e) { LogLinks(400, "creating Link", links); handleRESTException(e, "The OMF endpoint reported a Bad Request when sending AF Link"); success = false; } catch (const Unauthorized &e) { LogLinks(401, "creating Link", links); Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); success = false; } catch (const Conflict &e) { LogLinks(409, "creating Link", links); string msg = "Conflict sending AF Link Data message for the asset " + reading.getAssetName(); handleRESTException(e, msg.c_str()); Logger::getLogger()->warn(MESSAGE_PI_UNSTABLE, 409); m_PIstable = false; success = false; } catch (const std::exception &e) { LogLinks(0, "creating Link", links); handleRESTException(e, "AF Link send message exception"); success = false; } return success; } /** * Create the messages to link the asset holding the container to its parent asset * * @param reading The reading being sent * @param hints OMF Hints for this reading * @return OMF JSON snippet to create the AF Link */ string OMF::createAFLinks(Reading& reading, OMFHints *hints) { string AFDataMessage; if (m_sendFullStructure) { string assetName = m_assetName; string AFHierarchyLevel; string prefix; string objectPrefix; auto rule = m_AssetNamePrefix.find(assetName); if (rule != m_AssetNamePrefix.end()) { auto itemArray = rule->second; objectPrefix = ""; for (auto &item : itemArray) { string AFHierarchy; string prefix; AFHierarchy = std::get<0>(item); generateAFHierarchyPrefixLevel(AFHierarchy, prefix, AFHierarchyLevel); prefix = std::get<1>(item); if (objectPrefix.empty()) { objectPrefix = prefix; } Logger::getLogger()->debug("%s - assetName :%s: AFHierarchy :%s: prefix :%s: objectPrefix :%s: AFHierarchyLevel :%s: ", __FUNCTION__ ,assetName.c_str() , AFHierarchy.c_str() , prefix.c_str() , objectPrefix.c_str() , AFHierarchyLevel.c_str() ); // Create data for Static Data message AFDataMessage = OMF::createLinkData(reading, AFHierarchyLevel, prefix, objectPrefix, hints, false); } } else { Logger::getLogger()->error("AF hierarchy is not defined for the asset Name |%s|", assetName.c_str()); } } return AFDataMessage; } /** * Report an error related to an asset if the asset has not already been reported * * @param asset The asset name * @param level The level to log the message at * @param msg The message to log */ void OMF::reportAsset(const string& asset, const string& level, const string& msg) { if (std::find(m_reportedAssets.begin(), m_reportedAssets.end(), asset) == m_reportedAssets.end()) { m_reportedAssets.push_back(asset); if (level.compare("error") == 0) Logger::getLogger()->error(msg); else if (level.compare("warn") == 0) Logger::getLogger()->warn(msg); else if (level.compare("fatal") == 0) Logger::getLogger()->fatal(msg); else if (level.compare("info") == 0) Logger::getLogger()->info(msg); else Logger::getLogger()->debug(msg); } } ================================================ FILE: C/plugins/north/OMF/omfbuffer.cpp ================================================ /* * Fledge OMF north plugin buffer class * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <omfbuffer.h> #include <string.h> #include <string_utils.h> using namespace std; /** * Buffer class designed to hold OMF payloads that can * as required but have minimal copy semantics. */ /** * OMFBuffer constructor */ OMFBuffer::OMFBuffer() { buffers.push_front(new OMFBuffer::Buffer()); } /** * OMFBuffer destructor */ OMFBuffer::~OMFBuffer() { for (list<OMFBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { delete *it; } } /** * Clear all the buffers from the OMFBuffer and allow it to be reused */ void OMFBuffer::clear() { for (list<OMFBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { delete *it; } buffers.clear(); buffers.push_front(new OMFBuffer::Buffer()); } /** * Append a character to a buffer * * @param data The character to append to the buffer */ void OMFBuffer::append(const char data) { OMFBuffer::Buffer *buffer = buffers.back(); if (buffer->offset == buffer->length) { buffer = new OMFBuffer::Buffer(); buffers.push_back(buffer); } buffer->data[buffer->offset] = data; buffer->data[buffer->offset + 1] = 0; buffer->offset++; } /** * Append a character string to a buffer * * @para data The string to append to the buffer */ void OMFBuffer::append(const char *data) { unsigned int len = strlen(data); OMFBuffer::Buffer *buffer = buffers.back(); if (buffer->offset + len >= buffer->length) { if (len > BUFFER_CHUNK) { buffer = new OMFBuffer::Buffer(len + BUFFER_CHUNK); } else { buffer = new OMFBuffer::Buffer(); } buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], data, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append an integer to a buffer * * @param value The value to append to the buffer */ void OMFBuffer::append(const int value) { char tmpbuf[80]; unsigned int len; OMFBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%d", value); if (buffer->offset + len >= buffer->length) { buffer = new OMFBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append a long to a buffer * * @param value The long value to append to the buffer */ void OMFBuffer::append(const long value) { char tmpbuf[80]; unsigned int len; OMFBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%ld", value); if (buffer->offset + len >= buffer->length) { buffer = new OMFBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append an unsigned integer to a buffer * * @param value The unsigned long value to append to the buffer */ void OMFBuffer::append(const unsigned int value) { char tmpbuf[80]; unsigned int len; OMFBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%u", value); if (buffer->offset + len >= buffer->length) { buffer = new OMFBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append an unsigned long to a buffer * * @param value The value to append to the buffer */ void OMFBuffer::append(const unsigned long value) { char tmpbuf[80]; unsigned int len; OMFBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%lu", value); if (buffer->offset + len >= buffer->length) { buffer = new OMFBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append a double to a buffer * * @param value The double value to append to the buffer */ void OMFBuffer::append(const double value) { char tmpbuf[80]; unsigned int len; OMFBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%f", value); if (buffer->offset + len >= buffer->length) { buffer = new OMFBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append a string to a buffer * * @param str The string to be appended to the buffer */ void OMFBuffer::append(const string& str) { const char *cstr = str.c_str(); unsigned int len = strlen(cstr); OMFBuffer::Buffer *buffer = buffers.back(); if (buffer->offset + len >= buffer->length) { if (len > BUFFER_CHUNK) { buffer = new OMFBuffer::Buffer(len + BUFFER_CHUNK); } else { buffer = new OMFBuffer::Buffer(); } buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], cstr, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Quote and append a string to a buffer * * @param str The string to quote and append to the buffer */ void OMFBuffer::quote(const string& str) { string esc = str; StringEscapeQuotes(esc); const char *cstr = esc.c_str(); unsigned int len = strlen(cstr) + 2; OMFBuffer::Buffer *buffer = buffers.back(); if (buffer->offset + len >= buffer->length) { if (len > BUFFER_CHUNK) { buffer = new OMFBuffer::Buffer(len + BUFFER_CHUNK); } else { buffer = new OMFBuffer::Buffer(); } buffers.push_back(buffer); } buffer->data[buffer->offset] = '"'; memcpy(&buffer->data[buffer->offset + 1], cstr, len - 2); buffer->data[buffer->offset + len - 1] = '"'; buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Create a coalesced buffer from the buffer chain * * The buffer returned has been created using the new[] operator and must be * deleted by the caller. * @return char* The OMF payload in a single buffer */ const char *OMFBuffer::coalesce() { unsigned int length = 0, offset = 0; char *buffer = 0; if (buffers.size() == 1) { return buffers.back()->detach(); } for (list<OMFBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { length += (*it)->offset; } buffer = new char[length+1]; for (list<OMFBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { memcpy(&buffer[offset], (*it)->data, (*it)->offset); offset += (*it)->offset; } buffer[offset] = 0; return buffer; } /** * Construct a buffer with a standard size initial buffer. */ OMFBuffer::Buffer::Buffer() : offset(0), length(BUFFER_CHUNK), attached(true) { data = new char[BUFFER_CHUNK+1]; data[0] = 0; } /** * Construct a large buffer, passing the size of buffer required. This is useful * if you know your buffer requirements are large and you wish to reduce the amount * of allocation required. * * @param size The size of the initial buffer to allocate. */ OMFBuffer::Buffer::Buffer(unsigned int size) : offset(0), length(size), attached(true) { data = new char[size+1]; data[0] = 0; } /** * Buffer destructor, the buffer itself is also deleted by this * call and any reference to it must no longer be used. */ OMFBuffer::Buffer::~Buffer() { if (attached) { delete[] data; data = 0; } } /** * Detach the buffer from the OMFBuffer. The reference to the buffer * is removed from the OMFBuffer but the buffer itself is not deleted. * This allows the buffer ownership to be taken by external code * whilst allowing the OMFBuffer to allocate a new buffer. */ char *OMFBuffer::Buffer::detach() { char *rval = data; attached = false; length = 0; data = 0; return rval; } ================================================ FILE: C/plugins/north/OMF/omfhints.cpp ================================================ /* * Fledge OSI Soft OMF interface to PI Server. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <utility> #include <iostream> #include <string> #include <cstring> #include <omf.h> #include <OMFHint.h> #include <logger.h> #include <rapidjson/document.h> #include "rapidjson/error/en.h" #include "string_utils.h" #include <string_utils.h> #include <datapoint.h> using namespace std; using namespace rapidjson; #define OMFHINTS_AFLOCATION "\"AFLocation\"" /** * Extracts from a complete OMF hint the part on which the checksum should be generated, * for example, it will remove the section related to the AFLocation hint to avoid the creation of a new type * when the value changes. * * @param hint Original/complete OMF hint * * @return OMF hint that should be considered for the calculation of the checksum */ string OMFHints::getHintForChecksum(const string &hint) { size_t pos1, pos2, pos3; string hintFinal; hintFinal = hint; pos1 = hintFinal.find(OMFHINTS_AFLOCATION); if (pos1 != std::string::npos) { pos2 = hintFinal.find(",", pos1); if (pos2 != std::string::npos) { // There is another hint hintFinal.erase(pos1, pos2 - pos1 + 1); } else { pos3 = hintFinal.find(","); if (pos3 != std::string::npos) { hintFinal.erase(pos3, hintFinal.length() - pos3 -1); }else { hintFinal.erase(pos1, hintFinal.length() - pos1 -1); } } } // Handle special cases StringReplace(hintFinal, "{}", ""); if (hintFinal.length() == 3) { StringReplace(hintFinal, "{", ""); StringReplace(hintFinal, "}", ""); } return (hintFinal); } /** * Decodes the OMFhint in JSON format assigning the values to the memory structures: m_chksum, m_hints and m_datapointHints * * @param hint OMF hint in JSON format */ OMFHints::OMFHints(const string& hints) { string hintsTmp, hintsChksum; hintsTmp = hints; StringReplaceAll(hintsTmp,"\\",""); m_chksum = 0; if (hintsTmp[0] == '\"') { // Skip any enclosing "'s m_doc.Parse(hintsTmp.substr(1, hintsTmp.length() - 2).c_str()); hintsChksum = getHintForChecksum(hintsTmp); for (int i = 1; i < hintsChksum.length() - 1; i++) m_chksum += hintsChksum[i]; } else { m_doc.Parse(hintsTmp.c_str()); hintsChksum = getHintForChecksum(hintsTmp); for (int i = 0; i < hintsChksum.length(); i++) m_chksum += hintsChksum[i]; } Logger::getLogger()->debug("%s - hints original :%s: adapted :%s: chksum :%X: " , __FUNCTION__ ,hints.c_str() ,hintsChksum.c_str() , m_chksum); if (m_doc.HasParseError()) { Logger::getLogger()->error("Ignoring OMFHint '%s' parse error in JSON", hintsTmp.c_str()); } else { for (Value::ConstMemberIterator itr = m_doc.MemberBegin(); itr != m_doc.MemberEnd(); ++itr) { const char *name = itr->name.GetString(); if (strcmp(name, "number") == 0) { m_hints.push_back(new OMFNumberHint(itr->value.GetString())); } else if (strcmp(name, "integer") == 0) { m_hints.push_back(new OMFIntegerHint(itr->value.GetString())); } else if (strcmp(name, "typeName") == 0) { m_hints.push_back(new OMFTypeNameHint(itr->value.GetString())); } else if (strcmp(name, "tagName") == 0) { m_hints.push_back(new OMFTagNameHint(itr->value.GetString())); } else if (strcmp(name, "tag") == 0) { m_hints.push_back(new OMFTagHint(itr->value.GetString())); } else if (strcmp(name, "AFLocation") == 0) { m_hints.push_back(new OMFAFLocationHint(itr->value.GetString())); } else if (strcmp(name, "LegacyType") == 0) { m_hints.push_back(new OMFLegacyTypeHint(itr->value.GetString())); } else if (strcmp(name, "source") == 0) { m_hints.push_back(new OMFSourceHint(itr->value.GetString())); } else if (strcmp(name, "datapoint") == 0) { const Value &child = itr->value; if (child.IsArray()) { for (Value::ConstValueIterator dpitr2 = child.Begin(); dpitr2 != child.End(); ++dpitr2) { if (dpitr2->HasMember("name")) { const string dpname = (*dpitr2)["name"].GetString(); vector<OMFHint *> hints; for (Value::ConstMemberIterator dpitr = dpitr2->MemberBegin(); dpitr != dpitr2->MemberEnd(); ++dpitr) { const char *name = dpitr->name.GetString(); if (strcmp(name, "number") == 0) { hints.push_back(new OMFNumberHint(dpitr->value.GetString())); } else if (strcmp(name, "integer") == 0) { hints.push_back(new OMFIntegerHint(dpitr->value.GetString())); } else if (strcmp(name, "typeName") == 0) { hints.push_back(new OMFTypeNameHint(dpitr->value.GetString())); } else if (strcmp(name, "tagName") == 0) { hints.push_back(new OMFTagNameDatapointHint(dpitr->value.GetString())); } else if (strcmp(name, "tag") == 0) { hints.push_back(new OMFTagHint(dpitr->value.GetString())); } else if (strcmp(name, "uom") == 0) { hints.push_back(new OMFUOMHint(dpitr->value.GetString())); } else if (strcmp(name, "source") == 0) { hints.push_back(new OMFSourceHint(dpitr->value.GetString())); } else if (strcmp(name, "minimum") == 0) { hints.push_back(new OMFMinimumHint(dpitr->value.GetString())); } else if (strcmp(name, "maximum") == 0) { hints.push_back(new OMFMaximumHint(dpitr->value.GetString())); } else if (strcmp(name, "interpolation") == 0) { string interpolation = dpitr->value.GetString(); if (interpolation.compare("continuous") && interpolation.compare("discrete") && interpolation.compare("stepwisecontinuousleading") && interpolation.compare("stepwisecontinuousfollowing")) { Logger::getLogger()->warn("Invalid value for interpolation hint for %s, only continuous, discrete, stepwisecontinuousleading, and stepwisecontinuousfollowing are supported", dpname.c_str()); } else { hints.push_back(new OMFInterpolationHint(interpolation)); } } else if (strcmp(name, "name")) // Ignore the name { Logger::getLogger()->warn("Invalid OMF hint '%s'", name); } } m_datapointHints.insert(std::pair<string,vector<OMFHint *>>(dpname, hints)); } } } else { if (child.HasMember("name")) { const string dpname = child["name"].GetString(); vector<OMFHint *> hints; for (Value::ConstMemberIterator dpitr = child.MemberBegin(); dpitr != child.MemberEnd(); ++dpitr) { const char *name = dpitr->name.GetString(); if (strcmp(name, "number") == 0) { hints.push_back(new OMFNumberHint(dpitr->value.GetString())); } else if (strcmp(name, "integer") == 0) { hints.push_back(new OMFIntegerHint(dpitr->value.GetString())); } else if (strcmp(name, "typeName") == 0) { hints.push_back(new OMFTypeNameHint(dpitr->value.GetString())); } else if (strcmp(name, "tagName") == 0) { hints.push_back(new OMFTagNameDatapointHint(dpitr->value.GetString())); } else if (strcmp(name, "tag") == 0) { hints.push_back(new OMFTagHint(dpitr->value.GetString())); } else if (strcmp(name, "uom") == 0) { hints.push_back(new OMFUOMHint(dpitr->value.GetString())); } else if (strcmp(name, "source") == 0) { hints.push_back(new OMFSourceHint(dpitr->value.GetString())); } else if (strcmp(name, "minimum") == 0) { hints.push_back(new OMFMinimumHint(dpitr->value.GetString())); } else if (strcmp(name, "maximum") == 0) { hints.push_back(new OMFMaximumHint(dpitr->value.GetString())); } else if (strcmp(name, "interpolation") == 0) { string interpolation = dpitr->value.GetString(); if (interpolation.compare("continuous") && interpolation.compare("discrete") && interpolation.compare("stepwisecontinuousleading") && interpolation.compare("stepwisecontinuousfollowing")) { Logger::getLogger()->warn("Invalid value for interpolation hint for %s, only continuous, discrete, stepwisecontinuousleading, and stepwisecontinuousfollowing are supported", dpname.c_str()); } else { hints.push_back(new OMFInterpolationHint(interpolation)); } } else if (strcmp(name, "name")) // Ignore the name { Logger::getLogger()->warn("Invalid OMF hint '%s'", name); } } m_datapointHints.insert(std::pair<string,vector<OMFHint *>>(dpname, hints)); } } } else { Logger::getLogger()->error("Unrecognised hint '%s' in OMFHint", name); } } } } /** * Destructor for Hints class */ OMFHints::~OMFHints() { for (OMFHint *hint : m_hints) { delete hint; } for (auto it = m_datapointHints.begin(); it != m_datapointHints.end(); it++) { for (OMFHint *hint : it->second) { delete hint; } } m_datapointHints.erase(m_datapointHints.begin(), m_datapointHints.end()); m_hints.clear(); } /** * Return the hints for a given data point. If it has known then return the hits * for all data points. * * @param datapoint The name of the datapoint to retrieve the hints for */ const vector<OMFHint *>& OMFHints::getHints(const string& datapoint) const { auto it = m_datapointHints.find(datapoint); if (it != m_datapointHints.end()) { return it->second; } return m_hints; } ================================================ FILE: C/plugins/north/OMF/omfinfo.cpp ================================================ /* * Fledge OSIsoft OMF interface to PI Server. * * Copyright (c) 2023-2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <omfinfo.h> #include <utils.h> using namespace std; using namespace rapidjson; using namespace SimpleWeb; /** * Constructor for the OMFInformation class */ OMFInformation::OMFInformation(ConfigCategory *config) : m_sender(NULL), m_omf(NULL), m_ocs(NULL), m_connected(false) { m_logger = Logger::getLogger(); m_name = config->getName(); m_numBlocks = 1; int endpointPort = 0; // PIServerEndpoint handling string PIServerEndpoint = config->getValue("PIServerEndpoint"); string ADHRegions = config->getValue("ADHRegions"); string ServerHostname = config->getValue("ServerHostname"); if (gethostbyname(ServerHostname.c_str()) == NULL) { Logger::getLogger()->warn("Unable to resolve server hostname '%s'. This should be a valid hostname or IP Address.", ServerHostname.c_str()); } string ServerPort = config->getValue("ServerPort"); string url; string NamingScheme = config->getValue("NamingScheme"); // Translate the PIServerEndpoint configuration if(PIServerEndpoint.compare("PI Web API") == 0) { Logger::getLogger()->debug("PI-Server end point manually selected - PI Web API "); m_PIServerEndpoint = ENDPOINT_PIWEB_API; url = ENDPOINT_URL_PI_WEB_API; endpointPort = ENDPOINT_PORT_PIWEB_API; } else if(PIServerEndpoint.compare("Connector Relay") == 0) { Logger::getLogger()->debug("PI-Server end point manually selected - Connector Relay "); m_PIServerEndpoint = ENDPOINT_CR; url = ENDPOINT_URL_CR; endpointPort = ENDPOINT_PORT_CR; } else if(PIServerEndpoint.compare("AVEVA Data Hub") == 0) { Logger::getLogger()->debug("End point manually selected - AVEVA Data Hub"); m_PIServerEndpoint = ENDPOINT_ADH; url = ENDPOINT_URL_ADH; m_authUrl = AUTHORIZATION_URL_ADH; std::string region = "uswe"; if(ADHRegions.compare("EU-West") == 0) region = "euno"; else if(ADHRegions.compare("Australia") == 0) region = "auea"; StringReplace(url, "REGION_PLACEHOLDER", region); StringReplace(m_authUrl, "REGION_PLACEHOLDER", region); endpointPort = ENDPOINT_PORT_ADH; } else if(PIServerEndpoint.compare("OSIsoft Cloud Services") == 0) { Logger::getLogger()->debug("End point manually selected - OSIsoft Cloud Services"); m_PIServerEndpoint = ENDPOINT_OCS; url = ENDPOINT_URL_OCS; m_authUrl = AUTHORIZATION_URL_OCS; std::string region = "dat-b"; if(ADHRegions.compare("EU-West") == 0) region = "dat-d"; else if(ADHRegions.compare("Australia") == 0) Logger::getLogger()->error("OSIsoft Cloud Services are not hosted in Australia"); StringReplace(url, "REGION_PLACEHOLDER", region); StringReplace(m_authUrl, "REGION_PLACEHOLDER", region); endpointPort = ENDPOINT_PORT_OCS; } else if(PIServerEndpoint.compare("Edge Data Store") == 0) { Logger::getLogger()->debug("End point manually selected - Edge Data Store"); m_PIServerEndpoint = ENDPOINT_EDS; url = ENDPOINT_URL_EDS; endpointPort = ENDPOINT_PORT_EDS; } ServerPort = (ServerPort.compare("0") == 0) ? to_string(endpointPort) : ServerPort; if (endpointPort == ENDPOINT_PORT_PIWEB_API) { // Use SendFullStructure ? m_sendFullStructure = stringToBool(config->getValue("SendFullStructure")); } else { m_sendFullStructure = true; } m_tracingEnabled = stringToBool(config->getValue("EnableTracing")); unsigned int retrySleepTime = atoi(config->getValue("OMFRetrySleepTime").c_str()); unsigned int maxRetry = atoi(config->getValue("OMFMaxRetry").c_str()); unsigned int timeout = atoi(config->getValue("OMFHttpTimeout").c_str()); string producerToken = config->getValue("producerToken"); string formatNumber = config->getValue("formatNumber"); string formatInteger = config->getValue("formatInteger"); string DefaultAFLocation = config->getValue("DefaultAFLocation"); string AFMap = config->getValue("AFMap"); string PIWebAPIAuthMethod = config->getValue("PIWebAPIAuthenticationMethod"); string PIWebAPIUserId = config->getValue("PIWebAPIUserId"); string PIWebAPIPassword = config->getValue("PIWebAPIPassword"); string KerberosKeytabFileName = config->getValue("PIWebAPIKerberosKeytabFileName"); // OCS configurations string OCSNamespace = config->getValue("OCSNamespace"); string OCSTenantId = config->getValue("OCSTenantId"); string OCSClientId = config->getValue("OCSClientId"); string OCSClientSecret = config->getValue("OCSClientSecret"); StringReplace(url, "HOST_PLACEHOLDER", ServerHostname); StringReplace(url, "PORT_PLACEHOLDER", ServerPort); // TENANT_ID_PLACEHOLDER and NAMESPACE_ID_PLACEHOLDER, if present, will be replaced with the values of OCSTenantId and OCSNamespace StringReplace(url, "TENANT_ID_PLACEHOLDER", OCSTenantId); StringReplace(url, "NAMESPACE_ID_PLACEHOLDER", OCSNamespace); /** * Extract host, port, path from URL */ size_t findProtocol = url.find_first_of(":"); string protocol = url.substr(0, findProtocol); string tmpUrl = url.substr(findProtocol + 3); size_t findPort = tmpUrl.find_first_of(":"); string hostName = tmpUrl.substr(0, findPort); size_t findPath = tmpUrl.find_first_of("/"); string port = tmpUrl.substr(findPort + 1, findPath - findPort - 1); string path = tmpUrl.substr(findPath); string hostAndPort(hostName + ":" + port); // Set configuration fields m_protocol = protocol; m_hostAndPort = hostAndPort; m_path = path; m_retrySleepTime = retrySleepTime; m_maxRetry = maxRetry; m_timeout = timeout; m_typeId = TYPE_ID_DEFAULT; m_producerToken = producerToken; m_formatNumber = formatNumber; m_formatInteger = formatInteger; m_DefaultAFLocation = DefaultAFLocation; m_AFMap = AFMap; // OCS configurations m_OCSNamespace = OCSNamespace; m_OCSTenantId = OCSTenantId; m_OCSClientId = OCSClientId; m_OCSClientSecret = OCSClientSecret; // PI Web API end-point - evaluates the authentication method requested if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { if (PIWebAPIAuthMethod.compare("anonymous") == 0) { Logger::getLogger()->debug("PI Web API end-point - anonymous authentication"); m_PIWebAPIAuthMethod = "a"; } else if (PIWebAPIAuthMethod.compare("basic") == 0) { Logger::getLogger()->debug("PI Web API end-point - basic authentication"); m_PIWebAPIAuthMethod = "b"; m_PIWebAPICredentials = AuthBasicCredentialsGenerate(PIWebAPIUserId, PIWebAPIPassword); } else if (PIWebAPIAuthMethod.compare("kerberos") == 0) { Logger::getLogger()->debug("PI Web API end-point - kerberos authentication"); m_PIWebAPIAuthMethod = "k"; AuthKerberosSetup(m_KerberosKeytab, KerberosKeytabFileName); } else { Logger::getLogger()->error("Invalid authentication method for PI Web API :%s: ", PIWebAPIAuthMethod.c_str()); } } else { // For all other endpoint types, set PI Web API authentication to 'anonymous.' // This prevents the HttpSender from inserting PI Web API authentication headers. m_PIWebAPIAuthMethod = "a"; } // Use compression ? string compr = config->getValue("compression"); if (compr == "True" || compr == "true" || compr == "TRUE") m_compression = true; else m_compression = false; // Set the list of errors considered not blocking in the communication // with the PI Server if (m_PIServerEndpoint == ENDPOINT_PIWEB_API) { JSONStringToVectorString(m_notBlockingErrors, config->getValue("PIWebAPInotBlockingErrors"), string("EventInfo")); } else { JSONStringToVectorString(m_notBlockingErrors, config->getValue("notBlockingErrors"), string("errors400")); } /** * Add static data * Split the string up into each pair */ string staticData = config->getValue("StaticData"); size_t pos = 0; size_t start = 0; do { pos = staticData.find(",", start); string item = staticData.substr(start, pos - start); start = pos + 1; size_t pos2 = 0; if ((pos2 = item.find(":")) != string::npos) { string name = item.substr(0, pos2); while (name[0] == ' ') name = name.substr(1); string value = item.substr(pos2 + 1); while (value[0] == ' ') value = value.substr(1); if (!name.empty() && !value.empty()) { pair<string, string> sData = make_pair(name, value); m_staticData.push_back(sData); } } } while (pos != string::npos); // Set Asset/Datapoint data stream name delimiter m_delimiter = config->getValue("AssetDatapointNameDelimiter"); if (m_delimiter.empty()) { // Delimiter can't be empty. If the user has cleared it, set it to the default. m_delimiter = "."; } else { StringTrim(m_delimiter); if (m_delimiter.empty()) { // If trimming emptied the string, the delimiter is a blank which is legal m_delimiter = " "; } else { // Delimiter must be a single character m_delimiter.resize(1); } } // Set the Action Code for OMF Data posts: update or create m_dataActionCode = config->itemExists("OMFDataActionCode") ? config->getValue("OMFDataActionCode") : "update"; { // NamingScheme handling if(NamingScheme.compare("Concise") == 0) { m_NamingScheme = NAMINGSCHEME_CONCISE; } else if(NamingScheme.compare("Use Type Suffix") == 0) { m_NamingScheme = NAMINGSCHEME_SUFFIX; } else if(NamingScheme.compare("Use Attribute Hash") == 0) { m_NamingScheme = NAMINGSCHEME_HASH; } else if(NamingScheme.compare("Backward compatibility") == 0) { m_NamingScheme = NAMINGSCHEME_COMPATIBILITY; } Logger::getLogger()->debug("End point naming scheme :%s: ", NamingScheme.c_str() ); } // Fetch legacy OMF type option string legacy = config->getValue("Legacy"); if (legacy == "True" || legacy == "true" || legacy == "TRUE") m_legacy = true; else m_legacy = false; // Enable or disable OMF tracing based on the current configuration handleOMFTracing(); } /** * @brief Handles the enabling and configuration of OMF tracing. * * If OMF tracing is enabled, this function checks if the trace file exists. * If it does not exist, the file is created in write mode. If the file * exists but is read-only, the function changes the file permissions to * allow writing. If OMF tracing is disabled, it checks if the trace file * exists and has write permissions, and if so, sets it to read-only. */ void OMFInformation::handleOMFTracing() { std::string filename = HttpSender::getOMFTracePath(); // Retrieve the trace file path if (m_tracingEnabled) { if(!HttpSender::createDebugTraceDirectory()) { return; } // Check if the trace file exists std::ifstream fileCheck(filename.c_str()); if (!fileCheck) { // File does not exist, create it in write mode std::ofstream traceFile(filename.c_str(), std::ofstream::out); if (!traceFile) { Logger::getLogger()->error("Unable to create trace file: %s", filename.c_str()); } } else { // File exists, check if it is read-only struct stat fileStat; if (stat(filename.c_str(), &fileStat) == 0) { // Check if the file is read-only if (!(fileStat.st_mode & S_IWUSR)) { // Change the file permissions to allow writing if (chmod(filename.c_str(), fileStat.st_mode | S_IWUSR) != 0) { Logger::getLogger()->error("Unable to set write permissions for: %s", filename.c_str()); } } } } } else { // Check if the trace file exists before attempting to make it read-only if (access(filename.c_str(), F_OK) == 0) { // Check if the file has write permissions struct stat fileStat; if (stat(filename.c_str(), &fileStat) == 0) { // If the file has write permission, change it to read-only if (fileStat.st_mode & S_IWUSR) { if (chmod(filename.c_str(), fileStat.st_mode & ~S_IWUSR) != 0) { Logger::getLogger()->error("Unable to set read-only permissions for: %s", filename.c_str()); } } } } } } /** * Destructor for the OMFInformation class. */ OMFInformation::~OMFInformation() { delete m_sender; delete m_omf; delete m_ocs; // TODO cleanup the allocated member variables } /** * The plugin start entry point has been called * * @param storedData The data that has been persisted by a previous execution * of the plugin */ void OMFInformation::start(const string& storedData) { m_logger->info("Host: %s", m_hostAndPort.c_str()); if ((m_PIServerEndpoint == ENDPOINT_OCS) || (m_PIServerEndpoint == ENDPOINT_ADH)) { m_logger->info("Namespace: %s", m_OCSNamespace.c_str()); } // Parse JSON plugin_data Document JSONData; JSONData.Parse(storedData.c_str()); if (JSONData.HasParseError()) { m_logger->error("%s plugin error: failure parsing " "plugin data JSON object '%s'", PLUGIN_NAME, storedData.c_str()); } else if (JSONData.HasMember(TYPE_ID_KEY) && (JSONData[TYPE_ID_KEY].IsString() || JSONData[TYPE_ID_KEY].IsNumber())) { // Update type-id in PLUGIN_HANDLE object if (JSONData[TYPE_ID_KEY].IsNumber()) { m_typeId = JSONData[TYPE_ID_KEY].GetInt(); } else { m_typeId = atol(JSONData[TYPE_ID_KEY].GetString()); } } // Check if the configured Asset/Datapoint delimiter is legal in OMF which uses PI and AF rules bool changed = false; OMF::ApplyPIServerNamingRulesInvalidChars(m_delimiter, &changed); if (changed) { m_logger->error("Asset/Datapoint name delimiter '%s' is not legal in OMF", m_delimiter.c_str()); } else { m_logger->info("Asset/Datapoint name delimiter set to '%s'", m_delimiter.c_str()); } // Load sentdataTypes loadSentDataTypes(JSONData); // Log default type-id if (m_assetsDataTypes.size() == 1 && m_assetsDataTypes.find(FAKE_ASSET_KEY) != m_assetsDataTypes.end()) { // Only one value: we have the FAKE_ASSET_KEY and no other data Logger::getLogger()->info("%s plugin is using global OMF prefix %s=%d", PLUGIN_NAME, TYPE_ID_KEY, m_typeId); } else { Logger::getLogger()->info("%s plugin is using per asset OMF prefix %s=%d " "(max value found)", PLUGIN_NAME, TYPE_ID_KEY, getMaxTypeId()); } // Allocate an HttpSender subclass to communicate with PI Web API with selected authorization if (!m_sender) { /** * Select the transport library based on the authentication method and transport encryption * requirements. * * LibcurlHttps is used to integrate Kerberos as the SimpleHttp does not support it * the Libcurl integration implements only HTTPS not HTTP currently. We use SimpleHttp or * SimpleHttps, as appropriate for the URL given, if not using Kerberos * * * The handler is allocated using "Hostname : port", connect_timeout and request_timeout. * Default is no timeout */ if (m_PIWebAPIAuthMethod.compare("k") == 0) { m_sender = new LibcurlHttps(m_hostAndPort, m_timeout, m_timeout, m_retrySleepTime, m_maxRetry); } else { if (m_protocol.compare("http") == 0) { m_sender = new SimpleHttp(m_hostAndPort, m_timeout, m_timeout, m_retrySleepTime, m_maxRetry); } else { m_sender = new SimpleHttps(m_hostAndPort, m_timeout, m_timeout, m_retrySleepTime, m_maxRetry); } } m_sender->setAuthMethod (m_PIWebAPIAuthMethod); m_sender->setAuthBasicCredentials(m_PIWebAPICredentials); // OCS configurations m_sender->setOCSNamespace (m_OCSNamespace); m_sender->setOCSTenantId (m_OCSTenantId); m_sender->setOCSClientId (m_OCSClientId); m_sender->setOCSClientSecret (m_OCSClientSecret); } // Retrieve the destination data archive version m_connected = true; int httpCode = 200; switch (m_PIServerEndpoint) { case ENDPOINT_PIWEB_API: httpCode = PIWebAPIGetVersion(); if (httpCode >= 200 && httpCode < 400) { SetOMFVersion(); CheckDataActionCode(); Logger::getLogger()->info("%s connected to %s OMF Version: %s", m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); m_connected = true; } else { Logger::getLogger()->error("The PI Web API service %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); m_connected = false; } break; case ENDPOINT_EDS: httpCode = EDSGetVersion(); if (httpCode >= 200 && httpCode < 400) { SetOMFVersion(); CheckDataActionCode(); Logger::getLogger()->info("Edge Data Store %s OMF Version: %s", m_RestServerVersion.c_str(), m_omfversion.c_str()); m_connected = true; } else { Logger::getLogger()->error("Edge Data Store %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); m_connected = false; } break; case ENDPOINT_OCS: SetOMFVersion(); CheckDataActionCode(); Logger::getLogger()->info("OSIsoft Cloud Services OMF Version: %s", m_omfversion.c_str()); break; case ENDPOINT_ADH: SetOMFVersion(); CheckDataActionCode(); Logger::getLogger()->info("AVEVA Data Hub OMF Version: %s", m_omfversion.c_str()); break; case ENDPOINT_CR: SetOMFVersion(); CheckDataActionCode(); Logger::getLogger()->info("Connector Relay OMF Version: %s", m_omfversion.c_str()); break; default: SetOMFVersion(); CheckDataActionCode(); Logger::getLogger()->info("OMF Version: %s", m_omfversion.c_str()); break; } // Allocate the OMF class that implements the PI Server data protocol if (!m_omf) { m_omf = new OMF(m_name, *m_sender, m_path, m_assetsDataTypes, m_producerToken); m_omf->setSendFullStructure(m_sendFullStructure); m_omf->setDelimiter(m_delimiter); // Set PIServerEndpoint configuration m_omf->setNamingScheme(m_NamingScheme); m_omf->setPIServerEndpoint(m_PIServerEndpoint); m_omf->setDefaultAFLocation(m_DefaultAFLocation); m_omf->setAFMap(m_AFMap); // Generates the prefix to have unique asset_id across different levels of hierarchies string AFHierarchyLevel; m_omf->generateAFHierarchyPrefixLevel(m_DefaultAFLocation, m_prefixAFAsset, AFHierarchyLevel); m_omf->setPrefixAFAsset(m_prefixAFAsset); // Set OMF FormatTypes m_omf->setFormatType(OMF_TYPE_FLOAT, m_formatNumber); m_omf->setFormatType(OMF_TYPE_INTEGER, m_formatInteger); m_omf->setStaticData(&m_staticData); m_omf->setNotBlockingErrors(m_notBlockingErrors); if (m_omfversion == "1.1" || m_omfversion == "1.0") { Logger::getLogger()->info("Setting LegacyType to be true for OMF Version '%s'. This will force use old style complex types. ", m_omfversion.c_str()); m_omf->setLegacyMode(true); } else { m_omf->setLegacyMode(m_legacy); } } // Allocate the OCS class that implements ADH and OCS authentication if (!m_ocs) { if ((m_PIServerEndpoint == ENDPOINT_ADH) || (m_PIServerEndpoint == ENDPOINT_OCS)) { m_ocs = new OCS(m_authUrl); } } } /** * Send data to the OMF endpoint * * @param readings The block of readings to send * @return uint32_t The number of readings sent */ uint32_t OMFInformation::send(const vector<Reading *>& readings) { #if INSTRUMENT struct timeval startTime; gettimeofday(&startTime, NULL); #endif // Check if the destination data archive is available if (!IsDataArchiveConnected()) { // Error already reported by IsDataArchiveConnected return 0; } // For OCS and ADH, retrieve the authentication token if (m_ocs) { std::string token = m_ocs->OCSRetrieveAuthToken(m_OCSClientId, m_OCSClientSecret); if (!token.empty()) { m_OCSToken = token; m_sender->setOCSToken(token); } } // Exit immediately if the plugin is not stable due to PI Server errors if (!m_omf->isPIstable()) { return 0; } // Send the readings data to the PI Server m_omf->setOMFVersion(m_omfversion); m_omf->setDataActionCode(m_dataActionCode); m_omf->setPIconnected(m_connected); m_omf->setNumBlocks(m_numBlocks); uint32_t ret = m_omf->sendToServer(readings, m_compression); m_connected = m_omf->isPIconnected(); m_numBlocks = m_omf->getNumBlocks(); // Detect typeId change in OMF class if (m_omf->getTypeId() != m_typeId) { // Update typeId in plugin handle m_typeId = m_omf->getTypeId(); // Log change Logger::getLogger()->info("%s plugin: a new OMF global %s (%d) has been created.", PLUGIN_NAME, TYPE_ID_KEY, m_typeId); } #if INSTRUMENT Logger::getLogger()->debug("plugin_send elapsed time: %6.3f seconds, NumValues: %u", GetElapsedTime(&startTime), ret); #endif // Return sent data ret code return ret; } /** * Return the data to be persisted * @return string The data to persist */ string OMFInformation::saveData() { #if INSTRUMENT struct timeval startTime; gettimeofday(&startTime, NULL); #endif // Create save data std::ostringstream saveData; saveData << "{"; // Add sent data types string typesData = saveSentDataTypes(); if (!typesData.empty()) { // Save datatypes saveData << typesData; } else { // Just save type-id saveData << "\"" << TYPE_ID_KEY << "\": " << to_string(m_typeId); } saveData << "}"; // Log saving the plugin configuration Logger::getLogger()->debug("%s plugin: saving plugin_data '%s'", PLUGIN_NAME, saveData.str().c_str()); #if INSTRUMENT // For debugging: write plugin's JSON data to a file string jsonFilePath = getDataDir() + string("/logs/OMFSaveData.json"); ofstream f(jsonFilePath.c_str(), ios_base::trunc); f << saveData.str(); f.close(); Logger::getLogger()->debug("plugin_shutdown elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); #endif // Return current plugin data to save return saveData.str(); } /** * Load stored data types (already sent to PI server) * * Each element, the assetName, has type-id and datatype for each datapoint * * If no data exists in the plugin_data table, then a map entry * with FAKE_ASSET_KEY is made in order to set the start type-id * sequence with default value set to 1: * all new created OMF dataTypes have type-id prefix set to the value of 1. * * If data like {"type-id": 14} or {"type-id": "14" } is found, a map entry * with FAKE_ASSET_KEY is made and the start type-id sequence value is set * to the found value, i.e. 14: * all new created OMF dataTypes have type-id prefix set to the value of 14. * * If proper per asset types data is loaded, the FAKE_ASSET_KEY is not set: * all new created OMF dataTypes have type-id prefix set to the value of 1 * while existing (loaded) OMF dataTypes will keep their type-id values. * * @param JSONData The JSON document containing all saved data */ void OMFInformation::loadSentDataTypes(Document& JSONData) { if (JSONData.HasMember(SENT_TYPES_KEY) && JSONData[SENT_TYPES_KEY].IsArray()) { const Value& cachedTypes = JSONData[SENT_TYPES_KEY]; for (Value::ConstValueIterator it = cachedTypes.Begin(); it != cachedTypes.End(); ++it) { if (!it->IsObject()) { Logger::getLogger()->warn("%s plugin: current element in '%s' " \ "property is not an object, ignoring it", PLUGIN_NAME, SENT_TYPES_KEY); continue; } for (Value::ConstMemberIterator itr = it->MemberBegin(); itr != it->MemberEnd(); ++itr) { string key = itr->name.GetString(); const Value& cachedValue = itr->value; // Add typeId and dataTypes to the in memory cache long typeId; if (cachedValue.HasMember(TYPE_ID_KEY) && cachedValue[TYPE_ID_KEY].IsNumber()) { typeId = cachedValue[TYPE_ID_KEY].GetInt(); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property, ignoring it", PLUGIN_NAME, key.c_str(), TYPE_ID_KEY); continue; } long NamingScheme; if (cachedValue.HasMember(NAMING_SCHEME) && cachedValue[NAMING_SCHEME].IsNumber()) { NamingScheme = cachedValue[NAMING_SCHEME].GetInt(); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property, handling naming scheme in compatibility mode", PLUGIN_NAME, key.c_str(), NAMING_SCHEME); NamingScheme = NAMINGSCHEME_COMPATIBILITY; } string AFHHash; if (cachedValue.HasMember(AFH_HASH) && cachedValue[AFH_HASH].IsString()) { AFHHash = cachedValue[AFH_HASH].GetString(); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property", PLUGIN_NAME, key.c_str(), AFH_HASH); AFHHash = ""; } string AFHierarchy; if (cachedValue.HasMember(AF_HIERARCHY) && cachedValue[AF_HIERARCHY].IsString()) { AFHierarchy = cachedValue[AF_HIERARCHY].GetString(); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property", PLUGIN_NAME, key.c_str(), AF_HIERARCHY); AFHierarchy = ""; } string AFHierarchyOrig; if (cachedValue.HasMember(AF_HIERARCHY_ORIG) && cachedValue[AF_HIERARCHY_ORIG].IsString()) { AFHierarchyOrig = cachedValue[AF_HIERARCHY_ORIG].GetString(); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property", PLUGIN_NAME, key.c_str(), AF_HIERARCHY_ORIG); AFHierarchyOrig = ""; } string dataTypes; if (cachedValue.HasMember(DATA_KEY) && cachedValue[DATA_KEY].IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); const Value& types = cachedValue[DATA_KEY]; types.Accept(writer); dataTypes = buffer.GetString(); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property, ignoring it", PLUGIN_NAME, key.c_str(), DATA_KEY); continue; } unsigned long dataTypesShort; if (cachedValue.HasMember(DATA_KEY_SHORT) && cachedValue[DATA_KEY_SHORT].IsString()) { string strDataTypesShort = cachedValue[DATA_KEY_SHORT].GetString(); // The information are stored as string in hexadecimal format dataTypesShort = stoi (strDataTypesShort,nullptr,16); } else { dataTypesShort = calcTypeShort(dataTypes); if (dataTypesShort == 0) { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property", PLUGIN_NAME, key.c_str(), DATA_KEY_SHORT); } else { Logger::getLogger()->warn("%s plugin: current element '%s'" \ " doesn't have '%s' property, calculated '0x%X'", PLUGIN_NAME, key.c_str(), DATA_KEY_SHORT, dataTypesShort); } } unsigned short hintChecksum = 0; if (cachedValue.HasMember(DATA_KEY_HINT) && cachedValue[DATA_KEY_HINT].IsString()) { string strHint = cachedValue[DATA_KEY_HINT].GetString(); // The information are stored as string in hexadecimal format hintChecksum = stoi (strHint,nullptr,16); } OMFDataTypes dataType; dataType.typeId = typeId; dataType.types = dataTypes; dataType.typesShort = dataTypesShort; dataType.hintChkSum = hintChecksum; dataType.namingScheme = NamingScheme; dataType.afhHash = AFHHash; dataType.afHierarchy = AFHierarchy; dataType.afHierarchyOrig = AFHierarchyOrig; Logger::getLogger()->debug("%s - AFHHash :%s: AFHierarchy :%s: AFHierarchyOrig :%s: ", __FUNCTION__, AFHHash.c_str(), AFHierarchy.c_str() , AFHierarchyOrig.c_str() ); Logger::getLogger()->debug("%s - NamingScheme :%ld: ", __FUNCTION__,NamingScheme ); // Add data into the map m_assetsDataTypes[key] = dataType; } } } else { // There is no stored data when plugin starts first time if (JSONData.MemberBegin() != JSONData.MemberEnd()) { Logger::getLogger()->warn("Persisted data is not of the correct format, ignoring"); } OMFDataTypes dataType; dataType.typeId = m_typeId; dataType.types = "{}"; // Add default data into the map m_assetsDataTypes[FAKE_ASSET_KEY] = dataType; } } /** * Return the maximum value of type-id, among all entries in the map * * If the array is empty the m_typeId is returned. * * @return The maximum value of type-id found */ long OMFInformation::getMaxTypeId() { long maxId = m_typeId; for (auto it = m_assetsDataTypes.begin(); it != m_assetsDataTypes.end(); ++it) { if ((*it).second.typeId > maxId) { maxId = (*it).second.typeId; } } return maxId; } /** * Calls the PI Web API system information endpoint to get the product version * * @param logMessage If true, log error messages (default: true) * @return HttpCode REST response code */ int OMFInformation::PIWebAPIGetVersion(bool logMessage) { int res = 400; unsigned int retries = m_sender->getMaxRetries(); m_sender->setMaxRetries(0); try { string path = "https://" + m_hostAndPort + "/piwebapi/system"; vector<pair<string, string>> headers; headers.push_back( std::make_pair("Accept", "application/json")); m_RestServerVersion.clear(); res = m_sender->sendRequest("GET", path, headers, std::string("")); if (res >= 200 && res <= 299) { PIWebAPI piwebapi; m_RestServerVersion = piwebapi.ExtractVersion(m_sender->getHTTPResponse()); } } catch (const BadRequest &ex) { if (logMessage) { Logger::getLogger()->error("PI Web API system information BadRequest exception: %s", ex.what()); } res = 400; } catch (const Unauthorized &e) { if (logMessage) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); } res = 401; } catch (const std::exception &ex) { if (logMessage) { Logger::getLogger()->error("PI Web API system information exception: %s", ex.what()); } res = 400; } catch (...) { if (logMessage) { Logger::getLogger()->error("PI Web API system information generic exception"); } res = 400; } m_sender->setMaxRetries(retries); return res; } /** * Calls the Edge Data Store product information endpoint to get the EDS version * * @param logMessage If true, log error messages (default: true) * @return HttpCode REST response code */ int OMFInformation::EDSGetVersion(bool logMessage) { int res = 400; unsigned int retries = m_sender->getMaxRetries(); m_sender->setMaxRetries(0); try { string path = "http://" + m_hostAndPort + "/api/v1/diagnostics/productinformation"; vector<pair<string, string>> headers; headers.push_back( std::make_pair("Accept", "application/json")); m_RestServerVersion.clear(); res = m_sender->sendRequest("GET", path, headers, std::string("")); if (res >= 200 && res <= 299) { m_RestServerVersion = ParseEDSProductInformation(m_sender->getHTTPResponse()); } } catch (const BadRequest &ex) { if (logMessage) { Logger::getLogger()->error("Edge Data Store productinformation BadRequest exception: %s", ex.what()); } res = 400; } catch (const Unauthorized &e) { if (logMessage) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); } res = 401; } catch (const std::exception &ex) { if (logMessage) { Logger::getLogger()->error("Edge Data Store productinformation exception: %s", ex.what()); } res = 400; } catch (...) { if (logMessage) { Logger::getLogger()->error("Edge Data Store productinformation generic exception"); } res = 400; } m_sender->setMaxRetries(retries); return res; } /** * Calls the ADH Namespace identity endpoint to check the connection to ADH * * @param logMessage If true, log error messages (default: true) * @return HttpCode REST response code */ int OMFInformation::IsADHConnected(bool logMessage) { if (m_OCSToken.empty()) { std::string token = m_ocs->OCSRetrieveAuthToken(m_OCSClientId, m_OCSClientSecret, false); if (!token.empty()) { m_OCSToken = token; m_sender->setOCSToken(token); } } int res = 400; unsigned int retries = m_sender->getMaxRetries(); m_sender->setMaxRetries(0); try { string path = m_path; path.resize(path.size() - 4); // remove trailing "/omf" vector<pair<string, string>> headers; headers.push_back( std::make_pair("Accept", "application/json")); res = m_sender->sendRequest("GET", path, headers, std::string("")); } catch (const BadRequest &ex) { if (logMessage) { Logger::getLogger()->error("AVEVA Data Hub health check BadRequest exception: %s", ex.what()); } res = 400; } catch (const Unauthorized &e) { // HTTP 401: Land here if the ADH or OCS Token has expired if (logMessage) { Logger::getLogger()->error(MESSAGE_UNAUTHORIZED); } res = 401; } catch (const std::exception &ex) { if (logMessage) { Logger::getLogger()->error("AVEVA Data Hub health check exception: %s", ex.what()); } res = 400; } catch (...) { if (logMessage) { Logger::getLogger()->error("AVEVA Data Hub health check generic exception"); } res = 400; } m_sender->setMaxRetries(retries); return res; } /** * Set the supported OMF Version for the OMF endpoint */ void OMFInformation::SetOMFVersion() { switch (m_PIServerEndpoint) { case ENDPOINT_PIWEB_API: if (m_RestServerVersion.find("2019") != std::string::npos) { m_omfversion = "1.0"; } else if (m_RestServerVersion.find("2020") != std::string::npos) { m_omfversion = "1.1"; } else if (m_RestServerVersion.find("2021") != std::string::npos) { m_omfversion = "1.2"; } else { m_omfversion = "1.2"; } break; case ENDPOINT_EDS: // Edge Data Store versions with supported OMF versions: // EDS 2020 (1.0.0.609) OMF 1.0, 1.1 // EDS 2023 (1.1.1.46) OMF 1.0, 1.1, 1.2 // EDS 2023 Patch 1 (1.1.3.2) OMF 1.0, 1.1, 1.2 { int major = 0; int minor = 0; ParseProductVersion(m_RestServerVersion, &major, &minor); if ((major > 1) || (major == 1 && minor > 0)) { m_omfversion = "1.2"; } else { m_omfversion = EDS_OMF_VERSION; } } break; case ENDPOINT_CR: m_omfversion = CR_OMF_VERSION; break; case ENDPOINT_OCS: case ENDPOINT_ADH: default: m_omfversion = "1.2"; // assume cloud service OMF endpoint types support OMF 1.2 break; } } /** * Check the Action code for OMF Data messages. * This method changes the Action code only if 'update' is specified for an OMF version too old to support it. */ void OMFInformation::CheckDataActionCode() { if (!m_omfversion.empty()) { if ((m_omfversion.compare("1.2") != 0) && (m_dataActionCode.compare("update") == 0)) { Logger::getLogger()->warn("OMF Version %s does not support Data Action Code %s; setting to 'create'", m_omfversion.c_str(), m_dataActionCode.c_str()); m_dataActionCode = "create"; } } } /** * Evaluate if the endpoint is a PI Web API or a Connector Relay. * * @return OMF_ENDPOINT values */ OMF_ENDPOINT OMFInformation::identifyPIServerEndpoint() { OMF_ENDPOINT PIServerEndpoint; HttpSender *endPoint; vector<pair<string, string>> header; int httpCode; if (m_PIWebAPIAuthMethod.compare("k") == 0) { endPoint = new LibcurlHttps(m_hostAndPort, m_timeout, m_timeout, m_retrySleepTime, m_maxRetry); } else { endPoint = new SimpleHttps(m_hostAndPort, m_timeout, m_timeout, m_retrySleepTime, m_maxRetry); } // Set requested authentication endPoint->setAuthMethod (m_PIWebAPIAuthMethod); endPoint->setAuthBasicCredentials(m_PIWebAPICredentials); try { httpCode = endPoint->sendRequest("GET", m_path, header, ""); if (httpCode >= 200 && httpCode <= 399) { PIServerEndpoint = ENDPOINT_PIWEB_API; if (m_PIWebAPIAuthMethod == "b") Logger::getLogger()->debug("PI Web API end-point basic authorization granted"); } else { PIServerEndpoint = ENDPOINT_CR; } } catch (exception &ex) { Logger::getLogger()->warn("PI-Server end-point discovery encountered the error :%s: " "trying selecting the Connector Relay as an end-point", ex.what()); PIServerEndpoint = ENDPOINT_CR; } delete endPoint; return (PIServerEndpoint); } /** * Return a JSON string with the dataTypes to save in plugin_data * * Note: the entry with FAKE_ASSET_KEY is never saved. * * @return The string with JSON data */ string OMFInformation::saveSentDataTypes() { string ret; std::ostringstream newData; auto it = m_assetsDataTypes.find(FAKE_ASSET_KEY); if (it != m_assetsDataTypes.end()) { // Set typeId in FAKE_ASSET_KEY m_typeId = (*it).second.typeId; // Remove the entry m_assetsDataTypes.erase(it); } unsigned long tSize = m_assetsDataTypes.size(); if (tSize) { // Prepare output data (skip empty data types) newData << "\"" << SENT_TYPES_KEY << "\" : ["; bool pendingSeparator = false; for (auto it = m_assetsDataTypes.begin(); it != m_assetsDataTypes.end(); ++it) { if (((*it).second).types.compare("{}") != 0) { newData << (pendingSeparator ? ", " : ""); newData << "{\"" << (*it).first << "\" : {\"" << TYPE_ID_KEY << "\": " << to_string(((*it).second).typeId); // The information should be stored as string in hexadecimal format std::stringstream tmpStream; tmpStream << std::hex << ((*it).second).typesShort; std::string typesShort = tmpStream.str(); newData << ", \"" << DATA_KEY_SHORT << "\": \"0x" << typesShort << "\""; std::stringstream hintStream; hintStream << std::hex << ((*it).second).hintChkSum; std::string hintChecksum = hintStream.str(); newData << ", \"" << DATA_KEY_HINT << "\": \"0x" << hintChecksum << "\""; long NamingScheme; NamingScheme = ((*it).second).namingScheme; newData << ", \"" << NAMING_SCHEME << "\": " << to_string(NamingScheme) << ""; string AFHHash; AFHHash = ((*it).second).afhHash; newData << ", \"" << AFH_HASH << "\": \"" << AFHHash << "\""; string AFHierarchy; AFHierarchy = ((*it).second).afHierarchy; newData << ", \"" << AF_HIERARCHY << "\": \"" << AFHierarchy << "\""; string AFHierarchyOrig; AFHierarchyOrig = ((*it).second).afHierarchyOrig; newData << ", \"" << AF_HIERARCHY_ORIG << "\": \"" << AFHierarchyOrig << "\""; Logger::getLogger()->debug("%s - AFHHash :%s: AFHierarchy :%s: AFHierarchyOrig :%s:", __FUNCTION__, AFHHash.c_str(), AFHierarchy.c_str(), AFHierarchyOrig.c_str() ); Logger::getLogger()->debug("%s - NamingScheme :%ld: ", __FUNCTION__,NamingScheme ); newData << ", \"" << DATA_KEY << "\": " << (((*it).second).types.empty() ? "{}" : ((*it).second).types) << "}}"; pendingSeparator = true; } } tSize = m_assetsDataTypes.size(); if (!tSize) { // DataTypes map is empty return ret; } newData << "]"; ret = newData.str(); } return ret; } /** * Calculate the TypeShort in the case it is missing loading type definition * * Generate a 64 bit number containing a set of counts, * number of datapoints in an asset and the number of datapoint of each type we support. * */ unsigned long OMFInformation::calcTypeShort(const string& dataTypes) { union t_typeCount { struct { unsigned char tTotal; unsigned char tFloat; unsigned char tString; unsigned char spare0; unsigned char spare1; unsigned char spare2; unsigned char spare3; unsigned char spare4; } cnt; unsigned long valueLong = 0; } typeCount; Document JSONData; JSONData.Parse(dataTypes.c_str()); if (JSONData.HasParseError()) { Logger::getLogger()->error("calcTypeShort - unable to calculate TypeShort on :%s: ", dataTypes.c_str()); return (0); } for (Value::ConstMemberIterator it = JSONData.MemberBegin(); it != JSONData.MemberEnd(); ++it) { string key = it->name.GetString(); const Value& value = it->value; if (value.HasMember(PROPERTY_TYPE) && value[PROPERTY_TYPE].IsString()) { string type =value[PROPERTY_TYPE].GetString(); // Integer is handled as float in the OMF integration if (type.compare(PROPERTY_NUMBER) == 0) { typeCount.cnt.tFloat++; } else if (type.compare(PROPERTY_STRING) == 0) { typeCount.cnt.tString++; } else { Logger::getLogger()->error("calcTypeShort - unrecognized type :%s: ", type.c_str()); } typeCount.cnt.tTotal++; } else { Logger::getLogger()->error("calcTypeShort - unable to extract the type for :%s: ", key.c_str()); return (0); } } return typeCount.valueLong; } /** * Finds major and minor product version numbers in a version string * * @param versionString Version string of the form x.x.x.x where x's are integers * @param major Major product version returned (first digit) * @param minor Minor product version returned (second digit) */ void OMFInformation::ParseProductVersion(std::string &versionString, int *major, int *minor) { *major = 0; *minor = 0; size_t last = 0; size_t next = versionString.find(".", last); if (next != string::npos) { *major = atoi(versionString.substr(last, next - last).c_str()); last = next + 1; next = versionString.find(".", last); if (next != string::npos) { *minor = atoi(versionString.substr(last, next - last).c_str()); } } } /** * Parses the Edge Data Store version string from the /productinformation REST response. * Note that the response format differs between EDS 2020 and EDS 2023. * * @param json REST response from /api/v1/diagnostics/productinformation * @return version Edge Data Store version string */ std::string OMFInformation::ParseEDSProductInformation(std::string json) { std::string version; Document doc; if (!doc.Parse(json.c_str()).HasParseError()) { try { if (doc.HasMember("Edge Data Store")) // EDS 2020 response { const rapidjson::Value &EDS = doc["Edge Data Store"]; version = EDS.GetString(); } else if (doc.HasMember("Product Version")) // EDS 2023 response { const rapidjson::Value &EDS = doc["Product Version"]; version = EDS.GetString(); } } catch (...) { } } Logger::getLogger()->debug("Edge Data Store Version: %s JSON: %s", version.c_str(), json.c_str()); return version; } /** * Generate the credentials for the basic authentication * encoding user id and password joined by a single colon (:) using base64 * * @param userId User id to be used for the generation of the credentials * @param password Password to be used for the generation of the credentials * @return credentials to be used with the basic authentication */ string OMFInformation::AuthBasicCredentialsGenerate(string& userId, string& password) { string Credentials; Credentials = Crypto::Base64::encode(userId + ":" + password); return (Credentials); } /** * Configures for Kerberos authentication : * - set the environment KRB5_CLIENT_KTNAME to the position containing the * Kerberos keys, the keytab file. * * @param out keytabEnv string containing the command to set the * KRB5_CLIENT_KTNAME environment variable * @param keytabFileName File name of the keytab file * */ void OMFInformation::AuthKerberosSetup(string& keytabEnv, string& keytabFileName) { string fledgeData = getDataDir (); string keytabFullPath = fledgeData + "/etc/kerberos" + "/" + keytabFileName; keytabEnv = "KRB5_CLIENT_KTNAME=" + keytabFullPath; putenv((char *) keytabEnv.c_str()); if (access(keytabFullPath.c_str(), F_OK) != 0) { Logger::getLogger()->error("Kerberos authentication not possible, the keytab file :%s: is missing.", keytabFullPath.c_str()); } } /** * Calculate elapsed time in seconds * * @param startTime Start time of the interval to be evaluated * @return Elapsed time in seconds */ double OMFInformation::GetElapsedTime(struct timeval *startTime) { struct timeval endTime, diff; gettimeofday(&endTime, NULL); timersub(&endTime, startTime, &diff); return diff.tv_sec + ((double)diff.tv_usec / 1000000); } /** * Check if the destination data archive is available by making a lightweight REST GET call every 60 seconds. * Log a message if the connection state changes. * First call to this method will make a REST call. * This method can check connectivity with PI Web API, Edge Data Store and AVEVA Data Hub. * * @return Connection status */ bool OMFInformation::IsDataArchiveConnected() { static std::chrono::steady_clock::time_point nextCheck(std::chrono::steady_clock::time_point::duration::zero()); static bool lastConnected = m_connected; // Previous value of m_connected std::chrono::steady_clock::time_point now = std::chrono::steady_clock::now(); if (now >= nextCheck) { int httpCode; switch (m_PIServerEndpoint) { case ENDPOINT_PIWEB_API: httpCode = PIWebAPIGetVersion(false); break; case ENDPOINT_EDS: httpCode = EDSGetVersion(false); break; case ENDPOINT_ADH: case ENDPOINT_OCS: httpCode = IsADHConnected(false); break; default: httpCode = 200; // assume all other endpoint types are connected break; } m_connected = ((httpCode < 200) || (httpCode >= 400)) ? false : true; Logger::getLogger()->debug("%s: Check %s HTTP Code: %d Connected: %s LastConnected: %s", __FUNCTION__, m_hostAndPort.c_str(), httpCode, m_connected ? "true" : "false", lastConnected ? "true" : "false"); // See if the connection status has changed since the last check. // If so, write a disconnection or reconnection message. if (m_connected == true) { SetOMFVersion(); CheckDataActionCode(); if (lastConnected == false) { switch (m_PIServerEndpoint) { case ENDPOINT_PIWEB_API: Logger::getLogger()->warn("%s reconnected to %s OMF Version: %s", m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); break; case ENDPOINT_EDS: Logger::getLogger()->warn("Edge Data Store %s reconnected to %s OMF Version: %s", m_RestServerVersion.c_str(), m_hostAndPort.c_str(), m_omfversion.c_str()); break; case ENDPOINT_ADH: Logger::getLogger()->warn("AVEVA Data Hub %s reconnected. OMF Version: %s", m_hostAndPort.c_str(), m_omfversion.c_str()); break; case ENDPOINT_OCS: Logger::getLogger()->warn("OSIsoft Cloud Services %s reconnected. OMF Version: %s", m_hostAndPort.c_str(), m_omfversion.c_str()); break; default: Logger::getLogger()->warn("Destination Data Archive %s reconnected. OMF Version: %s", m_hostAndPort.c_str(), m_omfversion.c_str()); break; } lastConnected = true; } } else { if (lastConnected == true) { switch (m_PIServerEndpoint) { case ENDPOINT_PIWEB_API: Logger::getLogger()->error("The PI Web API service %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); break; case ENDPOINT_EDS: Logger::getLogger()->error("Edge Data Store %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); break; case ENDPOINT_ADH: Logger::getLogger()->error("AVEVA Data Hub %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); break; case ENDPOINT_OCS: Logger::getLogger()->error("OSIsoft Cloud Services %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); break; default: Logger::getLogger()->warn("Destination Data Archive %s is not available. HTTP Code: %d", m_hostAndPort.c_str(), httpCode); break; } lastConnected = false; } } nextCheck = now + std::chrono::seconds(60); } return m_connected; } ================================================ FILE: C/plugins/north/OMF/plugin.cpp ================================================ /* * Fledge PI Server north plugin. * * Copyright (c) 2018-2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Stefano Simonelli * * PI Web API OMF Endpoint documentation available at: * https://fledge-iot.readthedocs.io/en/latest/OMF.html?highlight=omf%20hint# * * Troubleshooting the PI-Server integration available at: * https://fledge-iot.readthedocs.io/en/latest/troubleshooting_pi-server_integration.html#how-to-check-the-pi-web-api-is-installed-and-running * * Information about Asset Framework Hierarchy Rules available at: * https://fledge-iot.readthedocs.io/en/latest/OMF.html?highlight=omf%20hint#asset-framework-hierarchy-rules * * Information about OMF Hint available at: * https://fledge-iot.readthedocs.io/en/latest/OMF.html?highlight=omf%20hint#omf-hints * https://fledge-iot.readthedocs.io/en/latest/plugins/fledge-filter-omfhint/index.html * * OSIsoft documentation about PI Web API: * https://docs.osisoft.com/bundle/pi-web-api/page/pi-web-api.html * https://docs.osisoft.com/bundle/pi-web-api-reference/page/help.html * https://pisquare.osisoft.com/s/topic/0TO1I000000OGBGWA4/pi-web-api * * OSIsoft documentation about OMF: * https://docs.osisoft.com/bundle/omf/page/index.html * * OSIsoft documentation about OMF in PI Web API: * https://docs.osisoft.com/bundle/omf-with-pi-web-api/page/osisoft-message-format.html * */ #include <unistd.h> #include <plugin_api.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include <string> #include <logger.h> #include <iostream> #include <omf.h> #include <piwebapi.h> #include <ocs.h> #include <simple_https.h> #include <simple_http.h> #include <config_category.h> #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "json_utils.h" #include "libcurl_https.h" #include "utils.h" #include "string_utils.h" #include <version.h> #include <omfinfo.h> #include "crypto.hpp" #define VERBOSE_LOG 0 #define INSTRUMENT 0 using namespace std; using namespace rapidjson; using namespace SimpleWeb; /* * Note that the properties "group" is used to group related items, these will appear in different tabs, * using the group name, in the GUI. * * This GUI functionality has yet to be implemented. * * Current groups used are * "Authentication" Items relating to authentication with the endpoint * "Connection" Connection tuning items * "Formats & Types" Controls for the way formats and types are defined * "Asset Framework" Asset framework configuration items * "Cloud" Things related to OCS or ADH only * "Advanced" Adds to the Advanced tab that already exists */ const char *PLUGIN_DEFAULT_CONFIG_INFO = QUOTE( { "plugin": { "description": "PI Server North C Plugin", "type": "string", "default": PLUGIN_NAME, "readonly": "true" }, "PIServerEndpoint": { "description": "Select the endpoint among PI Web API, Connector Relay, OSIsoft Cloud Services or Edge Data Store", "type": "enumeration", "options":["PI Web API", "AVEVA Data Hub", "Connector Relay", "OSIsoft Cloud Services", "Edge Data Store"], "default": "PI Web API", "order": "1", "displayName": "Endpoint" }, "ADHRegions": { "description": "AVEVA Data Hub or OSIsoft Cloud Services region", "type": "enumeration", "options":["US-West", "EU-West", "Australia"], "default": "US-West", "order": "2", "group" : "Cloud", "displayName": "Cloud Service Region", "validity" : "PIServerEndpoint == \"AVEVA Data Hub\" || PIServerEndpoint == \"OSIsoft Cloud Services\"" }, "SendFullStructure": { "description": "If true, create an AF structure to organize the data. If false, create PI Points only.", "type": "boolean", "default": "true", "order": "3", "displayName": "Create AF structure", "validity" : "PIServerEndpoint == \"PI Web API\"" }, "NamingScheme": { "description": "Define the naming scheme of the objects in the endpoint", "type": "enumeration", "options":["Concise", "Use Type Suffix", "Use Attribute Hash", "Backward compatibility"], "default": "Concise", "order": "4", "displayName": "Naming Scheme" }, "ServerHostname": { "description": "Hostname of the server running the endpoint either PI Web API or Connector Relay", "type": "string", "default": "localhost", "order": "5", "displayName": "Server hostname", "validity" : "PIServerEndpoint != \"Edge Data Store\" && PIServerEndpoint != \"OSIsoft Cloud Services\" && PIServerEndpoint != \"AVEVA Data Hub\"" }, "ServerPort": { "description": "Port on which the endpoint either PI Web API or Connector Relay or Edge Data Store is listening, 0 will use the default one", "type": "integer", "default": "0", "order": "6", "displayName": "Server port, 0=use the default", "validity" : "PIServerEndpoint != \"OSIsoft Cloud Services\" && PIServerEndpoint != \"AVEVA Data Hub\"" }, "producerToken": { "description": "The producer token that represents this Fledge stream", "type": "string", "default": "omf_north_0001", "order": "7", "displayName": "Producer Token", "group" : "Authentication", "validity" : "PIServerEndpoint == \"Connector Relay\"" }, "source": { "description": "Defines the source of the data to be sent on the stream, this may be one of either readings, statistics or audit.", "type": "enumeration", "options":["readings", "statistics"], "default": "readings", "order": "8", "displayName": "Data Source" }, "StaticData": { "description": "Static data to include in every Container created by OMF", "type": "string", "default": "Location: Palo Alto, Company: Dianomic", "order": "9", "displayName": "Static Data" }, "AssetDatapointNameDelimiter": { "description": "Delimiter character between Asset and Datapoint in PI data stream names", "type": "string", "default": ".", "order": "10", "displayName": "Data Stream Name Delimiter" }, "OMFDataActionCode": { "description": "OMF Action Code to use when POSTing OMF Data messages", "type": "enumeration", "options":["update", "create"], "default": "update", "order": "11", "displayName": "Action Code for Data Messages" }, "OMFRetrySleepTime": { "description": "Seconds between each retry for the communication with the OMF PI Connector Relay, NOTE : the time is doubled at each attempt.", "type": "integer", "default": "1", "order": "11", "group": "Connection", "displayName": "Sleep Time Retry" }, "OMFMaxRetry": { "description": "Max number of retries for the communication with the OMF PI Connector Relay", "type": "integer", "default": "3", "order": "12", "group": "Connection", "displayName": "Maximum Retry" }, "OMFHttpTimeout": { "description": "Timeout in seconds for the HTTP operations with the OMF PI Connector Relay", "type": "integer", "default": "10", "order": "13", "group": "Connection", "displayName": "HTTP Timeout" }, "formatInteger": { "description": "OMF format property to apply to the type Integer", "type": "enumeration", "default": "int64", "options": ["int64", "int32", "int16", "uint64", "uint32", "uint16"], "order": "14", "group": "Formats & Types", "displayName": "Integer Format" }, "formatNumber": { "description": "OMF format property to apply to the type Number", "type": "enumeration", "default": "float64", "options": ["float64", "float32"], "order": "15", "group": "Formats & Types", "displayName": "Number Format" }, "compression": { "description": "Compress readings data before sending to PI server", "type": "boolean", "default": "true", "order": "16", "group": "Connection", "displayName": "Compression" }, "DefaultAFLocation": { "description": "Defines the default location in the Asset Framework hierarchy in which the assets will be created, each level is separated by /, PI Web API only.", "type": "string", "default": "/fledge/data_piwebapi/default", "order": "17", "displayName": "Default Asset Framework Location", "group" : "Asset Framework", "validity" : "PIServerEndpoint == \"PI Web API\"" }, "AFMap": { "description": "Defines a set of rules to address where assets should be placed in the AF hierarchy.", "type": "JSON", "default": AF_HIERARCHY_RULES, "order": "18", "group" : "Asset Framework", "displayName": "Asset Framework hierarchy rules", "validity" : "PIServerEndpoint == \"PI Web API\"" }, "notBlockingErrors": { "description": "These errors are considered not blocking in the communication with the PI Server, the sending operation will proceed with the next block of data if one of these is encountered", "type": "JSON", "default": NOT_BLOCKING_ERRORS_DEFAULT, "order": "19" , "readonly": "true" }, "PIWebAPIAuthenticationMethod": { "description": "Defines the authentication method to be used with the PI Web API.", "type": "enumeration", "options":["anonymous", "basic", "kerberos"], "default": "anonymous", "order": "20", "group": "Authentication", "displayName": "PI Web API Authentication Method", "validity" : "PIServerEndpoint == \"PI Web API\"" }, "PIWebAPIUserId": { "description": "User id of PI Web API to be used with the basic access authentication.", "type": "string", "default": "user_id", "order": "21", "group": "Authentication", "displayName": "PI Web API User Id", "validity" : "PIServerEndpoint == \"PI Web API\" && PIWebAPIAuthenticationMethod == \"basic\"" }, "PIWebAPIPassword": { "description": "Password of the user of PI Web API to be used with the basic access authentication.", "type": "password", "default": "password", "order": "22" , "group": "Authentication", "displayName": "PI Web API Password", "validity" : "PIServerEndpoint == \"PI Web API\" && PIWebAPIAuthenticationMethod == \"basic\"" }, "PIWebAPIKerberosKeytabFileName": { "description": "Keytab file name used for Kerberos authentication in PI Web API.", "type": "string", "default": "piwebapi_kerberos_https.keytab", "order": "23" , "group": "Authentication", "displayName": "PI Web API Kerberos keytab file", "validity" : "PIServerEndpoint == \"PI Web API\" && PIWebAPIAuthenticationMethod == \"kerberos\"" }, "OCSNamespace" : { "description" : "Specifies the namespace where the information are stored and it is used for the interaction with AVEVA Data Hub or OCS", "type" : "string", "default": "name_space", "order": "24", "group" : "Cloud", "displayName" : "Namespace", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, "OCSTenantId" : { "description" : "Tenant id associated to the specific AVEVA Data Hub or OCS account", "type" : "string", "default": "ocs_tenant_id", "order": "25", "group" : "Cloud", "displayName" : "Tenant ID", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, "OCSClientId" : { "description" : "Client id associated to the specific account, it is used to authenticate when using the AVEVA Data Hub or OCS", "type" : "string", "default": "ocs_client_id", "order": "26", "group" : "Cloud", "displayName" : "Client ID", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, "OCSClientSecret" : { "description" : "Client secret associated to the specific account, it is used to authenticate with AVEVA Data Hub or OCS", "type" : "password", "default": "ocs_client_secret", "order": "27", "group" : "Cloud", "displayName" : "Client Secret", "validity" : "PIServerEndpoint == \"OSIsoft Cloud Services\" || PIServerEndpoint == \"AVEVA Data Hub\"" }, "PIWebAPInotBlockingErrors": { "description": "These errors are considered not blocking in the communication with the PI Web API, the sending operation will proceed with the next block of data if one of these is encountered", "type": "JSON", "default": NOT_BLOCKING_ERRORS_DEFAULT_PI_WEB_API, "order": "28" , "readonly": "true" }, "Legacy": { "description": "Force all data to be sent using complex OMF types", "type": "boolean", "default": "false", "order": "29", "group": "Formats & Types", "displayName": "Complex Types" }, "EnableTracing" : { "description" : "If enabled, a detailed tracing of OMF messages will be written to logs/debug-trace/omf.log file in Fledge data directory.", "type" : "boolean", "default" : "false", "order" : "30", "displayName" : "Enable Tracing" } } ); // "default": "{\"pipeline\": [\"DeltaFilter\"]}" /** * Return the information about this plugin */ /** * The PI Server plugin interface */ extern "C" { /** * The C API plugin information structure */ static PLUGIN_INFORMATION info = { PLUGIN_NAME, // Name VERSION, // Version SP_PERSIST_DATA | SP_BUILTIN, // Flags PLUGIN_TYPE_NORTH, // Type "1.0.0", // Interface version PLUGIN_DEFAULT_CONFIG_INFO // Configuration }; /** * Return the information about this plugin */ PLUGIN_INFORMATION *plugin_info() { return &info; } /** * Initialise the plugin with configuration. * * This function is called to get the plugin handle. */ PLUGIN_HANDLE plugin_init(ConfigCategory* configData) { #if INSTRUMENT struct timeval startTime; gettimeofday(&startTime, NULL); #endif int endpointPort = 0; /** * Handle the PI Server parameters here */ // Allocate connector struct OMFInformation *info = new OMFInformation(configData); #if INSTRUMENT Logger::getLogger()->debug("plugin_init elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); #endif return (PLUGIN_HANDLE)info; } /** * Plugin start with stored plugin_data * * @param handle The plugin handle * @param storedData The stored plugin_data */ void plugin_start(const PLUGIN_HANDLE handle, const string& storedData) { #if INSTRUMENT struct timeval startTime; gettimeofday(&startTime, NULL); // For debugging: write plugin's stored data to a file string jsonFilePath = getDataDir() + string("/logs/OMFStoredData.json"); ofstream f(jsonFilePath.c_str(), ios_base::trunc); f << storedData.c_str(); f.close(); #endif Logger* logger = Logger::getLogger(); OMFInformation *info = (OMFInformation *)handle; info->start(storedData); #if INSTRUMENT Logger::getLogger()->debug("plugin_start elapsed time: %6.3f seconds", GetElapsedTime(&startTime)); #endif } /** * Send Readings data to historian server */ uint32_t plugin_send(const PLUGIN_HANDLE handle, const vector<Reading *>& readings) { OMFInformation *info = (OMFInformation *)handle; return info->send(readings); } /** * Shutdown the plugin * * Delete allocated data * * Note: the entry with FAKE_ASSET_KEY ios never saved. * * @param handle The plugin handle * @return A string with JSON plugin data * the caller will persist */ string plugin_shutdown(PLUGIN_HANDLE handle) { // Delete the handle OMFInformation *info = (OMFInformation *) handle; string rval = info->saveData(); delete info; return rval; } // End of extern "C" }; ================================================ FILE: C/plugins/storage/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (FledgeStoragePlugins) set(CMAKE_CXX_FLAGS "-std=c++11 -O3") add_subdirectory(common) add_subdirectory(postgres) add_subdirectory(sqlite) ================================================ FILE: C/plugins/storage/README.rst ================================================ .. |br| raw:: html <br /> *************** Storage Plugins *************** This directory contains the source code for the plugins used by the Storage service. Building ======== To make this plugin, run the commands: :: mkdir build cd build cmake .. make Use the command ``make install`` to install in the default location, note you will need permission on the installation directory or use the sudo command. Pass the option *DESTDIR=* to set your own destination into which to install the Storage service. ================================================ FILE: C/plugins/storage/common/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.4.0) project(storage-common-lib) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) # Find source files file(GLOB SOURCES *.cpp) # Include header files include_directories(./include) include_directories(../../../common/include) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${DLLIB}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/plugins/storage/common/disk_monitor.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <disk_monitor.h> #include <logger.h> #include <sys/vfs.h> #include <string.h> using namespace std; /** * Construct a disk space monitor class * * It monitors the free space on two path, since the storage service may use * different locations for reading and configuration storage. If they are both * the same file system then the monitor is only done once. * * If the free space falls below 5% a fatal error is written to the error log * If it falls below 10% a warning is written advising that disk space should be released * It attempts to predict when storage will become exhausted. If it is less then 14 days * it will report this to the error log once a day. * If it is less than 72 hours it will report this once per hour. * * All reporting is to the system log * * NB In an ideal world we would make the thresholds and reporting interval configurable, * however we are running within the limited environment of a storage plugin and do not * have access to the manaagement client or configuration subsystem. */ DiskSpaceMonitor::DiskSpaceMonitor(const string& path1, const string& path2) : m_dbPath1(path1), m_dbPath2(path2), m_started(false), m_sameDevice(false), m_lastCheck(0), m_lastPerc1(0.0), m_lastPerc2(0.0), m_lastPrediction1(0.0), m_lastPrediction2(0.0), m_reported(0) { m_logger = Logger::getLogger(); } /** * Called periodically to monitor the disk usage * * @param interval The number of seconds between calls */ void DiskSpaceMonitor::periodic(int interval) { struct statfs stf1, stf2; if (!m_started) { // We have not yet started to monitor the disk usage. // Do the initial statfs calls to see if the configuration // and readings are on the same filesystem. If they are we // only monitor one of them // // If the statfs fails log it and do not start monitoring. The // rate at which logs are created is limited to prevent flooding // the error log. if (statfs(m_dbPath1.c_str(), &stf1) != 0) { if (m_reported == 0) { m_logger->error("Can't statfs %s, %s. Disk space monitoring is disabled", m_dbPath1.c_str(), strerror(errno)); m_reported++; } else if (++m_reported > FAILED_DISK_MONITOR_REPORT_INTERVAL) { m_reported = 0; } return; } if (statfs(m_dbPath2.c_str(), &stf2) != 0) { if (m_reported == 0) { m_logger->error("Can't statfs %s, %s. Disk space monitoring is disabled", m_dbPath2.c_str(), strerror(errno)); m_reported++; } else if (++m_reported > FAILED_DISK_MONITOR_REPORT_INTERVAL) { m_reported = 0; } return; } if (memcmp(&stf1.f_fsid, &stf2.f_fsid, sizeof(fsid_t)) == 0) // Same filesystem { m_sameDevice = true; } m_started = true; } m_lastCheck += interval; if (m_lastCheck < CHECK_THRESHOLD) { // Do not check too frerquently return; } m_lastCheck = 0; if (statfs(m_dbPath1.c_str(), &stf1) == 0) { unsigned long freespace = (unsigned long)stf1.f_bavail; unsigned long totalspace = (unsigned long)stf1.f_blocks; double perc = (double)(freespace * 100.0) / totalspace; if (perc < 5.0) { m_logger->fatal("Disk space is critically low. Urgent action required, continuing may result in data corruption"); } else if (perc < 10.0) { m_logger->error("Available disk space is becoming low, please consider releasing more disk space"); } if (m_lastPerc1 > 0.0) { double diff = m_lastPerc1 - perc; if (diff > 0.0) { double prediction = (perc * CHECK_THRESHOLD)/ (3600.0 * diff); if (prediction <= 72.0 && m_lastPrediction1 - prediction > 1.0) { m_logger->error("At current rates disk space will be exhausted in %.0f hours", prediction); m_lastPrediction1 = prediction; } else if (prediction / 24.0 <= 14 && (m_lastPrediction1 == 0.0 || m_lastPrediction1 - prediction > 24.0)) { m_lastPrediction1 = prediction; m_logger->warn("At current rates disk space will be exhausted in %.1f days", prediction / 24); } } else { m_lastPrediction1 = 0.0; } } m_lastPerc1 = perc; } if (m_sameDevice) { return; } if (statfs(m_dbPath2.c_str(), &stf1) == 0) { unsigned long freespace = (unsigned long)stf1.f_bavail; unsigned long totalspace = (unsigned long)stf1.f_blocks; double perc = (double)(freespace * 100.0) / totalspace; if (perc < 5.0) { m_logger->fatal("Disk space is critically low. Urgent action required, continuing may result in data corruption"); } else if (perc < 10.0) { m_logger->error("Available disk space is becoming low, please consider releasing more disk space"); } if (m_lastPerc2 > 0.0) { double diff = m_lastPerc2 - perc; if (diff > 0.0) { double prediction = (perc * CHECK_THRESHOLD)/ (3600.0 * diff); if (prediction <= 72.0 && (m_lastPrediction2 == 0.0 || m_lastPrediction2 - prediction > 1.0)) { m_logger->error("At current rates disk space will be exhausted in %.0f hours", prediction); m_lastPrediction2 = prediction; } else if (prediction / 24.0 <= 14 && m_lastPrediction1 - prediction > 24.0) { m_lastPrediction2 = prediction; m_logger->warn("At current rates disk space will be exhausted in %.1f days", prediction / 24); } } else { m_lastPrediction2 = 0.0; } } m_lastPerc2 = perc; } } ================================================ FILE: C/plugins/storage/common/include/disk_monitor.h ================================================ #ifndef _DISK_SPACE_MONITOR_H #define _DISK_SPACE_MONITOR_H /* * Fledge storage service. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #define CHECK_THRESHOLD 300 // check every 5 minutes #define FAILED_DISK_MONITOR_REPORT_INTERVAL 600 // Interval between loggign failure to stat the filesystem (10 minutes) /** * A class to monitor the free disk space used to store * the various storage databases */ class DiskSpaceMonitor { public: DiskSpaceMonitor(const std::string& db1, const std::string& db2); void periodic(int interval); private: std::string m_dbPath1; std::string m_dbPath2; bool m_started; bool m_sameDevice; unsigned int m_lastCheck; Logger *m_logger; double m_lastPerc1; double m_lastPerc2; double m_lastPrediction1; double m_lastPrediction2; int m_reported; }; #endif ================================================ FILE: C/plugins/storage/common/include/sql_buffer.h ================================================ #ifndef _SQL_BUFFER_H #define _SQL_BUFFER_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <list> #define BUFFER_CHUNK 1024 /** * Buffer class designed to hold SQL statement that can * as required but have minimal copy semantics. */ class SQLBuffer { class Buffer { public: Buffer(); Buffer(unsigned int); ~Buffer(); char *detach(); char *data; unsigned int offset; unsigned int length; bool attached; }; public: SQLBuffer(); ~SQLBuffer(); bool isEmpty() { return buffers.empty() || (buffers.size() == 1 && buffers.front()->offset == 0); } void append(const char); void append(const char *); void append(const int); void append(const unsigned int); void append(const long); void append(const unsigned long); void append(const double); void append(const std::string&); void quote(const std::string&); const char *coalesce(); void clear(); private: std::list<Buffer *> buffers; }; #endif ================================================ FILE: C/plugins/storage/common/sql_buffer.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <sql_buffer.h> #include <string.h> #include <string_utils.h> using namespace std; /** * Buffer class designed to hold SQL statement that can * as required but have minimal copy semantics. */ /** * SQLBuffer constructor */ SQLBuffer::SQLBuffer() { buffers.push_front(new SQLBuffer::Buffer()); } /** * SQLBuffer destructor */ SQLBuffer::~SQLBuffer() { for (list<SQLBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { delete *it; } } /** * Clear all the buffers from the SQLBuffer and allow it to be reused */ void SQLBuffer::clear() { for (list<SQLBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { delete *it; } buffers.clear(); buffers.push_front(new SQLBuffer::Buffer()); } /** * Append a character to a buffer * * @param data The character to append to the buffer */ void SQLBuffer::append(const char data) { SQLBuffer::Buffer *buffer = buffers.back(); if (buffer->offset == buffer->length) { buffer = new SQLBuffer::Buffer(); buffers.push_back(buffer); } buffer->data[buffer->offset] = data; buffer->data[buffer->offset + 1] = 0; buffer->offset++; } /** * Append a character string to a buffer * * @para data The string to append to the buffer */ void SQLBuffer::append(const char *data) { unsigned int len = strlen(data); SQLBuffer::Buffer *buffer = buffers.back(); if (buffer->offset + len >= buffer->length) { if (len > BUFFER_CHUNK) { buffer = new SQLBuffer::Buffer(len + BUFFER_CHUNK); } else { buffer = new SQLBuffer::Buffer(); } buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], data, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append an integer to a buffer * * @param value The value to append to the buffer */ void SQLBuffer::append(const int value) { char tmpbuf[80]; unsigned int len; SQLBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%d", value); if (buffer->offset + len >= buffer->length) { buffer = new SQLBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append a long to a buffer * * @param value The long value to append to the buffer */ void SQLBuffer::append(const long value) { char tmpbuf[80]; unsigned int len; SQLBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%ld", value); if (buffer->offset + len >= buffer->length) { buffer = new SQLBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append an unsigned integer to a buffer * * @param value The unsigned long value to append to the buffer */ void SQLBuffer::append(const unsigned int value) { char tmpbuf[80]; unsigned int len; SQLBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%u", value); if (buffer->offset + len >= buffer->length) { buffer = new SQLBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append an unsigned long to a buffer * * @param value The value to append to the buffer */ void SQLBuffer::append(const unsigned long value) { char tmpbuf[80]; unsigned int len; SQLBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%lu", value); if (buffer->offset + len >= buffer->length) { buffer = new SQLBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append a double to a buffer * * @param value The double value to append to the buffer */ void SQLBuffer::append(const double value) { char tmpbuf[80]; unsigned int len; SQLBuffer::Buffer *buffer = buffers.back(); len = (unsigned int)snprintf(tmpbuf, 80, "%f", value); if (buffer->offset + len >= buffer->length) { buffer = new SQLBuffer::Buffer(); buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], tmpbuf, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Append a string to a buffer * * @param str The string to be appended to the buffer */ void SQLBuffer::append(const string& str) { const char *cstr = str.c_str(); unsigned int len = strlen(cstr); SQLBuffer::Buffer *buffer = buffers.back(); if (buffer->offset + len >= buffer->length) { if (len > BUFFER_CHUNK) { buffer = new SQLBuffer::Buffer(len + BUFFER_CHUNK); } else { buffer = new SQLBuffer::Buffer(); } buffers.push_back(buffer); } memcpy(&buffer->data[buffer->offset], cstr, len); buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Quote and append a string to a buffer * * @param str The string to quote and append to the buffer */ void SQLBuffer::quote(const string& str) { string esc = str; StringEscapeQuotes(esc); const char *cstr = esc.c_str(); unsigned int len = strlen(cstr) + 2; SQLBuffer::Buffer *buffer = buffers.back(); if (buffer->offset + len >= buffer->length) { if (len > BUFFER_CHUNK) { buffer = new SQLBuffer::Buffer(len + BUFFER_CHUNK); } else { buffer = new SQLBuffer::Buffer(); } buffers.push_back(buffer); } buffer->data[buffer->offset] = '"'; memcpy(&buffer->data[buffer->offset + 1], cstr, len - 2); buffer->data[buffer->offset + len - 1] = '"'; buffer->offset += len; buffer->data[buffer->offset] = 0; } /** * Create a coalesced buffer from the buffer chain * * The buffer returned has been created usign the new[] operator and must be * deleted by the caller. * @return char* The SQL statement in a single buffer */ const char *SQLBuffer::coalesce() { unsigned int length = 0, offset = 0; char *buffer = 0; if (buffers.size() == 1) { return buffers.back()->detach(); } for (list<SQLBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { length += (*it)->offset; } buffer = new char[length+1]; for (list<SQLBuffer::Buffer *>::iterator it = buffers.begin(); it != buffers.end(); ++it) { memcpy(&buffer[offset], (*it)->data, (*it)->offset); offset += (*it)->offset; } buffer[offset] = 0; return buffer; } /** * Construct a buffer with a standard size initial buffer. */ SQLBuffer::Buffer::Buffer() : offset(0), length(BUFFER_CHUNK), attached(true) { data = new char[BUFFER_CHUNK+1]; data[0] = 0; } /** * Construct a large buffer, passign the size of buffer required. THis is useful * if you know your buffer requirements are large and you wish to reduce the amount * of allocation required. * * @param size The size of the initial buffer to allocate. */ SQLBuffer::Buffer::Buffer(unsigned int size) : offset(0), length(size), attached(true) { data = new char[size+1]; data[0] = 0; } /** * Buffer destructor, the buffer itself is also deleted by this * call and any reference to it must no longer be used. */ SQLBuffer::Buffer::~Buffer() { if (attached) { delete[] data; data = 0; } } /** * Detach the buffer from the SQLBuffer. The reference to the buffer * is removed from the SQLBuffer but the buffer itself is not deleted. * This allows the buffer ownership to be taken by external code * whilst allowing the SQLBuffer to allocate a new buffer. */ char *SQLBuffer::Buffer::detach() { char *rval = data; attached = false; length = 0; data = 0; return rval; } ================================================ FILE: C/plugins/storage/postgres/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(postgres) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) # Handle Postgres on RedHat/CentOS set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}") include(CheckRhPg) # Find source files file(GLOB SOURCES *.cpp) # Include header files include_directories(include ../../../common/include ../../../services/common/include ../common/include) include_directories(../../../thirdparty/rapidjson/include) link_directories(${PROJECT_BINARY_DIR}/../../../lib) if(${RH_POSTGRES_FOUND} EQUAL 1) include_directories(${RH_POSTGRES_INCLUDE}) link_directories(${RH_POSTGRES_LIB64}) else() include_directories(/usr/include/postgresql) endif() # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) target_link_libraries(${PROJECT_NAME} -lpq) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/plugins/storage//${PROJECT_NAME}) # Install init.sql install(FILES ${CMAKE_SOURCE_DIR}/scripts/plugins/storage/${PROJECT_NAME}/init.sql DESTINATION fledge/plugins/storage//${PROJECT_NAME}) ================================================ FILE: C/plugins/storage/postgres/CheckRhPg.cmake ================================================ # Evaluates if rh-postgresql13 is available and enabled and identifies its path execute_process( COMMAND "scl" "enable" "rh-postgresql13" "command -v pg_isready" RESULT_VARIABLE CMD_ERROR OUTPUT_VARIABLE CMD_OUTPUT ) if(${CMD_ERROR} EQUAL 0) string(REGEX REPLACE "/bin/pg_isready[\n]" "" RH_POSTGRES_PATH ${CMD_OUTPUT}) set(RH_POSTGRES_FOUND 1) set(RH_POSTGRES_INCLUDE "${RH_POSTGRES_PATH}/include") set(RH_POSTGRES_LIB64 "${RH_POSTGRES_PATH}/lib64") else() set(RH_POSTGRES_FOUND 0) endif() if(${RH_POSTGRES_FOUND} EQUAL 1) MESSAGE( STATUS "INFO: rh-postgresql13 found in the path :${RH_POSTGRES_PATH}:") else() MESSAGE( STATUS "INFO: rh-postgresql13 not found") endif() ================================================ FILE: C/plugins/storage/postgres/README.rst ================================================ ************************* PostgreSQL Storage Plugin ************************* This directory contains the source code for the PostgreSQL Storage plugin used by the Storage service. Building ======== To make postgres plugin run the commands: :: mkdir build cd build cmake .. make Use the command ``make install`` to install in the default location, note you will need permission on the installation directory or use the sudo command. Pass the option *DESTDIR=* to set your own destination into which to install the Storage service. ================================================ FILE: C/plugins/storage/postgres/connection.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <connection.h> #include <connection_manager.h> #include <sql_buffer.h> #include <iostream> #include <libpq-fe.h> #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <string> #include <vector> #include <stdarg.h> #include <stdlib.h> #include <sstream> #include <logger.h> #include <time.h> #include <algorithm> #include <math.h> #include <sys/time.h> #include "json_utils.h" #include "string_utils.h" #include <iostream> #include <chrono> #include <thread> using namespace std; using namespace rapidjson; static time_t connectErrorTime = 0; #define CONNECT_ERROR_THRESHOLD 5*60 // 5 minutes #define MSG_LEN 5000 // // Used for the purge operation - start // #define PURGE_DELETE_BLOCK_SIZE 10000 #define MIN_PURGE_DELETE_BLOCK_SIZE 1000 #define MAX_PURGE_DELETE_BLOCK_SIZE 10000 #define TARGET_PURGE_BLOCK_DEL_TIME (70*1000) // 70 msec #define PURGE_BLOCK_SZ_GRANULARITY 5 // 5 rows #define RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS 30 // recalculate purge block size after every 30 blocks #define START_TIME std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); #define END_TIME std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); \ auto usecs = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); // // Used for the purge operation - end #define LEN_BUFFER_DATE 100 // Format timestamp having microseconds #define F_DATEH24_US "YYYY-MM-DD HH24:MI:SS.US" static int purgeBlockSize = PURGE_DELETE_BLOCK_SIZE; const vector<string> pg_column_reserved_words = { "user" }; /** * Check whether to compute timebucket query with min,max,avg for all datapoints * * @param payload JSON payload * @return True if aggregation is 'all' */ bool aggregateAll(const Value& payload) { if (payload.HasMember("aggregate") && payload["aggregate"].IsObject()) { const Value& agg = payload["aggregate"]; if (agg.HasMember("operation") && strcmp(agg["operation"].GetString(), "all") == 0) { return true; } } return false; } /** * Build, exucute and return data of a timebucket query with min,max,avg for all datapoints * * @param payload JSON object for timebucket query * @param resultSet JSON Output buffer * @return True of success, false on any error */ bool Connection::aggregateQuery(const Value& payload, string& resultSet) { if (!payload.HasMember("where") || !payload.HasMember("timebucket")) { raiseError("retrieve", "aggregateQuery is missing " "'where' and/or 'timebucket' properties"); return false; } SQLBuffer sql; sql.append("SELECT asset_code, "); double size = 1; string timeColumn; // Check timebucket object if (payload.HasMember("timebucket")) { const Value& bucket = payload["timebucket"]; if (!bucket.HasMember("timestamp")) { raiseError("retrieve", "aggregateQuery is missing " "'timestamp' property for 'timebucket'"); return false; } // Time column timeColumn = bucket["timestamp"].GetString(); // Bucket size if (bucket.HasMember("size")) { size = atof(bucket["size"].GetString()); if (!size) { size = 1; } } // Time format for output if (bucket.HasMember("format") && size >= 1) { sql.append("to_char("); sql.append("\""); sql.append("timestamp"); sql.append("\""); sql.append(", '"); sql.append(bucket["format"].GetString()); sql.append("')"); } else { if (size < 1) { // sub-second granularity to time bucket size: // force output formatting with microseconds sql.append("to_char("); sql.append("\""); sql.append("timestamp"); sql.append("\""); sql.append(", '"); sql.append("YYYY-MM-DD HH24:MI:SS.US"); sql.append("')"); } else { sql.append("timestamp"); } } // Time output alias if (bucket.HasMember("alias")) { sql.append(" AS "); sql.append(bucket["alias"].GetString()); } } // JSON format aggregated data sql.append(", (('{' || string_agg('\"' || x || '\" : ' || resd, ', ') || '}')::jsonb) AS reading "); // subquery sql.append("FROM ( SELECT x, asset_code, max(timestamp) AS timestamp, "); // Add mon sql.append("'{\"min\" : ' || min((reading->>x)::float) || ', "); // Add max sql.append("\"max\" : ' || max((reading->>x)::float) || ', "); // Add avg sql.append("\"average\" : ' || avg((reading->>x)::float) || ', "); // Add count sql.append("\"count\" : ' || count(reading->>x) || ', "); // Add sum sql.append("\"sum\" : ' || sum((reading->>x)::float) || '}' AS resd "); // subquery sql.append("FROM ( SELECT asset_code, "); sql.append(timeColumn); sql.append(", to_timestamp("); // Size formatted string string size_format; if (fmod(size, 1.0) == 0.0) { size_format = to_string(int(size)); } else { size_format = to_string(size); } // Add timebucket size if (size != 1) { sql.append(size_format); if (size > 1) { sql.append(" * round(extract(epoch from "); } else { sql.append(" * round((extract(epoch from "); } sql.append(timeColumn); sql.append(" ) / "); sql.append(size_format); sql.append(')'); if (size > 1) { sql.append(')'); } else { sql.append("::numeric, 6))"); } } else { sql.append(" round(extract(epoch from "); sql.append(timeColumn); sql.append(") / 1)) "); } sql.append(" AS \"timestamp\", reading, "); // Get all datapoints in 'reading' field sql.append("jsonb_object_keys(reading) AS x FROM fledge.readings "); // Add where condition sql.append("WHERE "); vector<string> asset_codes; if (!jsonWhereClause(payload["where"], sql, asset_codes)) { raiseError("retrieve", "aggregateQuery: failure while building WHERE clause"); return false; } // sort results sql.append(" ORDER BY "); sql.append(timeColumn); sql.append(" DESC) tmp "); // Add group by sql.append("GROUP BY x, asset_code, "); sql.append("round(extract(epoch from "); sql.append(timeColumn); sql.append(") / "); if (size != 1) { sql.append(size_format); } else { sql.append('1'); } sql.append(") "); // sort results sql.append("ORDER BY timestamp DESC) tbl "); // Add final group and sort sql.append("GROUP BY timestamp, asset_code ORDER BY timestamp DESC"); // Add limit if (payload.HasMember("limit")) { if (!payload["limit"].IsInt()) { raiseError("retrieve", "aggregateQuery: limit must be specfied as an integer"); return false; } sql.append(" LIMIT "); try { sql.append(payload["limit"].GetInt()); } catch (exception e) { raiseError("retrieve", "aggregateQuery: bad value for limit parameter: %s", e.what()); return false; } } sql.append(';'); // Execute query const char *query = sql.coalesce(); logSQL("CommonRetrieve", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_TUPLES_OK) { mapResultSet(res, resultSet); PQclear(res); return true; } char *SQLState = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (!strcmp(SQLState, "22P02")) // Conversion error { raiseError("retrieve", "Unable to convert data to the required type"); } else { raiseError("retrieve", PQerrorMessage(dbConnection)); } PQclear(res); return false; } /** * Create a database connection */ Connection::Connection() : m_maxReadingRows(INSERT_ROW_LIMIT) { const char *defaultConninfo = "dbname = fledge"; char *connInfo = NULL; if ((connInfo = getenv("DB_CONNECTION")) == NULL) { connInfo = (char *)defaultConninfo; } /* Make a connection to the database */ dbConnection = PQconnectdb(connInfo); /* Check to see that the backend connection was successfully made */ if (PQstatus(dbConnection) != CONNECTION_OK) { if (connectErrorTime == 0 || (time(0) - connectErrorTime > CONNECT_ERROR_THRESHOLD)) { Logger::getLogger()->error("Failed to connect to the database: %s", PQerrorMessage(dbConnection)); connectErrorTime = time(0); } throw runtime_error("Unable to connect to PostgreSQL database"); } logSQL("Set", "session time zone 'UTC' "); PGresult *res = PQexec(dbConnection, " set session time zone 'UTC' "); if (PQresultStatus(res) != PGRES_COMMAND_OK) { Logger::getLogger()->error("set session time zone failed: %s", PQerrorMessage(dbConnection)); } PQclear(res); } /** * Destructor for the database connection. Close the connection * to Postgres */ Connection::~Connection() { PQfinish(dbConnection); } /** * Perform a query against a common table * */ bool Connection::retrieve(const string& schema, const string& table, const string& condition, string& resultSet) { Document document; // Default template parameter uses UTF8 and MemoryPoolAllocator. SQLBuffer sql; SQLBuffer jsonConstraints; // Extra constraints to add to where clause vector<string> asset_codes; try { if (condition.empty()) { sql.append("SELECT * FROM "); sql.append(table); } else { if (document.Parse(condition.c_str()).HasParseError()) { raiseError("retrieve", "Failed to parse JSON payload"); return false; } if (document.HasMember("aggregate")) { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } if (!jsonAggregates(document, document["aggregate"], sql, jsonConstraints, false)) { return false; } sql.append(" FROM "); } else if (document.HasMember("join")) { sql.append("SELECT "); selectColumns(document, sql, 0); } else if (document.HasMember("return")) { int col = 0; Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { sql.append("\""); sql.append(itr->GetString()); sql.append("\""); } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } sql.append("to_char("); sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(", '"); sql.append((*itr)["format"].GetString()); sql.append("')"); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(" AT TIME ZONE '"); sql.append((*itr)["timezone"].GetString()); sql.append("' "); } else { sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } sql.append(" FROM "); } else { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } sql.append(" * FROM "); } if (document.HasMember("join")) { sql.append(" FROM "); sql.append(table); sql.append(" t0"); appendTables(schema, document, sql, 1); } else { sql.append(table); } if (document.HasMember("where")) { sql.append(" WHERE "); if (document.HasMember("join")) { if (!jsonWhereClause(document["where"], sql, asset_codes, false, "t0.")) { return false; } // Now and the join condition itself string col0, col1; const Value& join = document["join"]; if (join.HasMember("on") && join["on"].IsString()) { col0 = join["on"].GetString(); } else { raiseError("rerieve", "Missing on item"); return false; } if (join.HasMember("table")) { const Value& table = join["table"]; if (table.HasMember("column") && table["column"].IsString()) { col1 = table["column"].GetString(); } else { raiseError("QueryTable", "Missing column in join table"); return false; } } sql.append(" AND t0."); sql.append(col0); sql.append(" = t1."); sql.append(col1); sql.append(" "); if (join.HasMember("query") && join["query"].IsObject()) { sql.append("AND "); const Value& query = join["query"]; processJoinQueryWhereClause(query, sql, asset_codes, 1); } } else if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes)) { return false; } } else { raiseError("retrieve", "JSON does not contain where clause"); return false; } if (! jsonConstraints.isEmpty()) { sql.append(" AND "); const char *jsonBuf = jsonConstraints.coalesce(); sql.append(jsonBuf); delete[] jsonBuf; } } if (!jsonModifiers(document, sql)) { return false; } } sql.append(';'); const char *query = sql.coalesce(); logSQL("CommonRetrieve", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_TUPLES_OK) { mapResultSet(res, resultSet); PQclear(res); return true; } char *SQLState = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (!strcmp(SQLState, "22P02")) // Conversion error { raiseError("retrieve", "Unable to convert data to the required type"); } else { raiseError("retrieve", PQerrorMessage(dbConnection)); } PQclear(res); return false; } catch (exception e) { raiseError("retrieve", "Internal error: %s", e.what()); } return false; } /** * Perform a query against the readings table * */ bool Connection::retrieveReadings(const string& condition, string& resultSet) { Document document; // Default template parameter uses UTF8 and MemoryPoolAllocator. SQLBuffer sql; SQLBuffer jsonConstraints; // Extra constraints to add to where clause const string table = "readings"; try { if (condition.empty()) { const char *sql_cmd = R"( SELECT id, asset_code, reading, to_char(user_ts, ')" F_DATEH24_US R"(') as user_ts, to_char(ts, ')" F_DATEH24_US R"(') as ts FROM fledge.)"; sql.append(sql_cmd); sql.append(table); } else { if (document.Parse(condition.c_str()).HasParseError()) { raiseError("retrieve", "Failed to parse JSON payload"); return false; } // timebucket aggregate all datapoints if (aggregateAll(document)) { return aggregateQuery(document, resultSet); } if (document.HasMember("aggregate")) { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } if (!jsonAggregates(document, document["aggregate"], sql, jsonConstraints, true)) { return false; } sql.append(" FROM fledge."); } else if (document.HasMember("return")) { int col = 0; Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { if (strcmp(itr->GetString() ,"user_ts") == 0) { // Display without TZ expression and microseconds also sql.append("to_char(user_ts, '" F_DATEH24_US "') as user_ts"); } else if (strcmp(itr->GetString() ,"ts") == 0) { // Display without TZ expression and microseconds also sql.append("to_char(ts, '" F_DATEH24_US "') as ts"); } else { sql.append("\""); sql.append(itr->GetString()); sql.append("\""); } } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } sql.append("to_char("); sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(", '"); sql.append((*itr)["format"].GetString()); sql.append("')"); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(" AT TIME ZONE '"); sql.append((*itr)["timezone"].GetString()); sql.append("' "); // Use aliasing to avoid duplicate column name if (!itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["column"].GetString()); sql.append("\""); } } else { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Display without TZ expression and microseconds also sql.append("to_char(user_ts, '" F_DATEH24_US "')"); if (! itr->HasMember("alias")) { sql.append(" AS \"user_ts\" "); } } else if (strcmp((*itr)["column"].GetString() ,"ts") == 0) { // Display without TZ expression and microseconds also sql.append("to_char(ts, '" F_DATEH24_US "')"); if (! itr->HasMember("alias")) { sql.append(" AS \"ts\" "); } } else { sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); } } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } sql.append(" FROM fledge."); } else { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } const char *sql_cmd = R"( id, asset_code, reading, to_char(user_ts, ')" F_DATEH24_US R"(') as user_ts, to_char(ts, ')" F_DATEH24_US R"(') as ts FROM fledge.)"; sql.append(sql_cmd); } sql.append(table); if (document.HasMember("where")) { sql.append(" WHERE "); if (document.HasMember("where")) { vector<string> asset_codes; if (!jsonWhereClause(document["where"], sql, asset_codes)) { return false; } } else { raiseError("retrieve", "JSON does not contain where clause"); return false; } if (! jsonConstraints.isEmpty()) { sql.append(" AND "); const char *jsonBuf = jsonConstraints.coalesce(); sql.append(jsonBuf); delete[] jsonBuf; } } if (!jsonModifiers(document, sql)) { return false; } } sql.append(';'); const char *query = sql.coalesce(); logSQL("CommonRetrieve", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_TUPLES_OK) { mapResultSet(res, resultSet); PQclear(res); return true; } char *SQLState = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (!strcmp(SQLState, "22P02")) // Conversion error { raiseError("retrieve", "Unable to convert data to the required type"); } else { raiseError("retrieve", PQerrorMessage(dbConnection)); } PQclear(res); return false; } catch (exception e) { raiseError("retrieve", "Internal error: %s", e.what()); } return false; } /** * Insert data into a table */ int Connection::insert(const std::string& table, const std::string& data) { SQLBuffer sql; Document document; ostringstream convert; std::size_t arr = data.find("inserts"); // Check first the 'inserts' property in JSON data bool stdInsert = (arr == std::string::npos || arr > 8); // If input data is not an array of iserts // create an array with one element if (stdInsert) { convert << "{ \"inserts\" : [ "; convert << data; convert << " ] }"; } if (document.Parse(stdInsert ? convert.str().c_str() : data.c_str()).HasParseError()) { raiseError("insert", "Failed to parse JSON payload\n"); return -1; } // Get the array with row(s) Value &inserts = document["inserts"]; if (!inserts.IsArray()) { raiseError("insert", "Payload is missing the inserts array"); return -1; } // Number of inserts int ins = 0; // Iterate through insert array for (Value::ConstValueIterator iter = inserts.Begin(); iter != inserts.End(); ++iter) { if (!iter->IsObject()) { raiseError("insert", "Each entry in the insert array must be an object"); return -1; } int col = 0; SQLBuffer values; sql.append("INSERT INTO "); sql.append(table); sql.append(" ("); for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); itr != (*iter).MemberEnd(); ++itr) { if (itr->value.IsNull()) continue; // Append column name if (col) { sql.append(", "); } string field_name = double_quote_reserved_column_name(itr->name.GetString()); sql.append(field_name); // Append column value if (col) { values.append(", "); } if (itr->value.IsString()) { const char *str = itr->value.GetString(); // Check if the string is a function if (isFunction(str)) { values.append(str); } else { values.append('\''); values.append(escape(str)); values.append('\''); } } else if (itr->value.IsDouble()) values.append(itr->value.GetDouble()); else if (itr->value.IsNumber()) values.append(itr->value.GetInt()); else if (itr->value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); itr->value.Accept(writer); values.append('\''); values.append(escape(buffer.GetString())); values.append('\''); } col++; } sql.append(") VALUES ("); const char *vals = values.coalesce(); sql.append(vals); delete[] vals; sql.append(");"); // Increment row count ins++; } const char *query = sql.coalesce(); logSQL("CommonInsert", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return atoi(PQcmdTuples(res)); } raiseError("insert", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Perform an update against a common table * */ int Connection::update(const string& table, const string& payload) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; int row = 0; ostringstream convert; bool allowZero = false; std::size_t arr = payload.find("updates"); bool changeReqd = (arr == std::string::npos || arr > 8); if (changeReqd) { convert << "{ \"updates\" : [ "; convert << payload; convert << " ] }"; } if (document.Parse(changeReqd?convert.str().c_str():payload.c_str()).HasParseError()) { raiseError("update", "Failed to parse JSON payload"); return -1; } else { Value &updates = document["updates"]; if (!updates.IsArray()) { raiseError("update", "Payload is missing the updates array"); return -1; } int i=0; for (Value::ConstValueIterator iter = updates.Begin(); iter != updates.End(); ++iter,++i) { if (!iter->IsObject()) { raiseError("update", "Each entry in the update array must be an object"); return -1; } sql.append("UPDATE "); sql.append(table); sql.append(" SET "); int col = 0; if ((*iter).HasMember("values")) { const Value& values = (*iter)["values"]; for (Value::ConstMemberIterator itr = values.MemberBegin(); itr != values.MemberEnd(); ++itr) { if (col != 0) { sql.append( ", "); } sql.append("\""); sql.append(itr->name.GetString()); sql.append("\""); sql.append(" = "); if (itr->value.IsString()) { const char *str = itr->value.GetString(); // Check if the string is a function if (isFunction(str)) { sql.append(str); } else { sql.append('\''); sql.append(escape(str)); sql.append('\''); } } else if (itr->value.IsDouble()) sql.append(itr->value.GetDouble()); else if (itr->value.IsUint64()) sql.append((unsigned long)itr->value.GetUint64()); else if (itr->value.IsInt64()) sql.append((long)itr->value.GetInt64()); else if (itr->value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); itr->value.Accept(writer); sql.append('\''); sql.append(escape(buffer.GetString())); sql.append('\''); } // Handle JSON value null: "item" : null else if (itr->value.IsNull()) { sql.append("NULL"); } col++; } } if ((*iter).HasMember("expressions")) { const Value& exprs = (*iter)["expressions"]; if (!exprs.IsArray()) { raiseError("update", "The property exressions must be an array"); return -1; } for (Value::ConstValueIterator itr = exprs.Begin(); itr != exprs.End(); ++itr) { if (col != 0) { sql.append( ", "); } if (!itr->IsObject()) { raiseError("update", "expressions must be an array of objects"); return -1; } if (!itr->HasMember("column")) { raiseError("update", "Missing column property in expressions array item"); return -1; } if (!itr->HasMember("operator")) { raiseError("update", "Missing operator property in expressions array item"); return -1; } if (!itr->HasMember("value")) { raiseError("update", "Missing value property in expressions array item"); return -1; } sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(" = "); sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(' '); sql.append((*itr)["operator"].GetString()); sql.append(' '); const Value& value = (*itr)["value"]; if (value.IsString()) { const char *str = value.GetString(); // Check if the string is a function if (isFunction(str)) { sql.append(str); } else { sql.append('\''); sql.append(str); sql.append('\''); } } else if (value.IsDouble()) sql.append(value.GetDouble()); else if (value.IsUint64()) sql.append((unsigned long)value.GetUint64()); else if (value.IsInt64()) sql.append((long)value.GetInt64()); else if (value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); sql.append('\''); sql.append(escape(buffer.GetString())); sql.append('\''); } col++; } } if ((*iter).HasMember("json_properties")) { const Value& exprs = (*iter)["json_properties"]; if (!exprs.IsArray()) { raiseError("update", "The property json_properties must be an array"); return -1; } for (Value::ConstValueIterator itr = exprs.Begin(); itr != exprs.End(); ++itr) { if (col != 0) { sql.append( ", "); } if (!itr->IsObject()) { raiseError("update", "json_properties must be an array of objects"); return -1; } if (!itr->HasMember("column")) { raiseError("update", "Missing column property in json_properties array item"); return -1; } if (!itr->HasMember("path")) { raiseError("update", "Missing path property in json_properties array item"); return -1; } if (!itr->HasMember("value")) { raiseError("update", "Missing value property in json_properties array item"); return -1; } sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(" = jsonb_set("); sql.append((*itr)["column"].GetString()); sql.append(", '{"); const Value& path = (*itr)["path"]; if (!path.IsArray()) { raiseError("update", "The property path must be an array"); return -1; } int pathElement = 0; for (Value::ConstValueIterator itr2 = path.Begin(); itr2 != path.End(); ++itr2) { if (pathElement > 0) { sql.append(','); } if (itr2->IsString()) { sql.append(itr2->GetString()); } else { raiseError("update", "The elements of path must all be strings"); return -1; } pathElement++; } sql.append("}', "); const Value& value = (*itr)["value"]; if (value.IsString()) { const char *str = value.GetString(); // Check if the string is a function if (isFunction(str)) { sql.append("'\""); sql.append(str); sql.append("\"'"); } else { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); sql.append("'\""); sql.append(escape_double_quotes(escape(JSONunescape(buffer.GetString())))); sql.append("\"'"); } } else if (value.IsDouble()) { sql.append(value.GetDouble()); } else if (value.IsUint64()) { sql.append((unsigned long)value.GetUint64()); } else if (value.IsInt64()) { sql.append((long)value.GetInt64()); } else if (value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); std::string buffer_escaped = "\""; buffer_escaped.append(escape_double_quotes(escape(buffer.GetString()))); buffer_escaped.append( "\""); sql.append('\''); sql.append(buffer_escaped); sql.append('\''); } sql.append(")"); col++; } } if (col == 0) { raiseError("update", "Missing values or expressions object in payload"); return -1; } if ((*iter).HasMember("condition")) { sql.append(" WHERE "); vector<string> asset_codes; if (!jsonWhereClause((*iter)["condition"], sql, asset_codes)) { return false; } } else if ((*iter).HasMember("where")) { vector<string> asset_codes; sql.append(" WHERE "); if (!jsonWhereClause((*iter)["where"], sql, asset_codes)) { return false; } } if (iter->HasMember("modifier") && (*iter)["modifier"].IsArray()) { const Value& modifier = (*iter)["modifier"]; for (Value::ConstValueIterator modifiers = modifier.Begin(); modifiers != modifier.End(); ++modifiers) { if (modifiers->IsString()) { string mod = modifiers->GetString(); if (mod.compare("allowzero") == 0) { allowZero = true; } } } } sql.append(';'); } } const char *query = sql.coalesce(); logSQL("CommonUpdate", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_COMMAND_OK) { int rowsUpdated = atoi(PQcmdTuples(res)); if (rowsUpdated == 0 && allowZero == false) { raiseError("update", "No rows where updated"); return -1; } PQclear(res); return atoi(PQcmdTuples(res)); } raiseError("update", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Perform a delete against a common table * */ int Connection::deleteRows(const string& table, const string& condition) { Document document; // Default template parameter uses UTF8 and MemoryPoolAllocator. SQLBuffer sql; sql.append("DELETE FROM "); sql.append(table); if (! condition.empty()) { sql.append(" WHERE "); if (document.Parse(condition.c_str()).HasParseError()) { raiseError("delete", "Failed to parse JSON payload"); return -1; } else { if (document.HasMember("where")) { vector<string> asset_codes; if (!jsonWhereClause(document["where"], sql, asset_codes)) { return -1; } } else { raiseError("delete", "JSON does not contain where clause"); return -1; } } } sql.append(';'); const char *query = sql.coalesce(); logSQL("CommonDelete", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return atoi(PQcmdTuples(res)); } raiseError("delete", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Format a date to a fixed format with milliseconds, microseconds and * timezone expressed, examples : * * case - formatted |2019-01-01 10:01:01.000000+00:00| date |2019-01-01 10:01:01| * case - formatted |2019-02-01 10:02:01.000000+00:00| date |2019-02-01 10:02:01.0| * case - formatted |2019-02-02 10:02:02.841000+00:00| date |2019-02-02 10:02:02.841| * case - formatted |2019-02-03 10:02:03.123456+00:00| date |2019-02-03 10:02:03.123456| * case - formatted |2019-03-01 10:03:01.100000+00:00| date |2019-03-01 10:03:01.1+00:00| * case - formatted |2019-03-02 10:03:02.123000+00:00| date |2019-03-02 10:03:02.123+00:00| * case - formatted |2019-03-03 10:03:03.123456+00:00| date |2019-03-03 10:03:03.123456+00:00| * case - formatted |2019-03-04 10:03:04.123456+01:00| date |2019-03-04 10:03:04.123456+01:00| * case - formatted |2019-03-05 10:03:05.123456-01:00| date |2019-03-05 10:03:05.123456-01:00| * case - formatted |2019-03-04 10:03:04.123456+02:30| date |2019-03-04 10:03:04.123456+02:30| * case - formatted |2019-03-05 10:03:05.123456-02:30| date |2019-03-05 10:03:05.123456-02:30| * * @param out false if the date is invalid * */ bool Connection::formatDate(char *formatted_date, size_t buffer_size, const char *date) { struct timeval tv = {0}; struct tm tm = {0}; char *valid_date = nullptr; // Extract up to seconds memset(&tm, 0, sizeof(tm)); valid_date = strptime(date, "%Y-%m-%d %H:%M:%S", &tm); if (! valid_date) { return (false); } strftime (formatted_date, buffer_size, "%Y-%m-%d %H:%M:%S", &tm); // Work out the microseconds from the fractional part of the seconds char fractional[10] = {0}; sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%[0-9]*", fractional); // Truncate to max 6 digits fractional[6] = 0; int multiplier = 6 - (int)strlen(fractional); if (multiplier < 0) multiplier = 0; while (multiplier--) strcat(fractional, "0"); strcat(formatted_date ,"."); strcat(formatted_date ,fractional); // Handles timezone char timezone_hour[5] = {0}; char timezone_min[5] = {0}; char sign[2] = {0}; sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%*d-%2[0-9]:%2[0-9]", timezone_hour, timezone_min); if (timezone_hour[0] != 0) { strcat(sign, "-"); } else { memset(timezone_hour, 0, sizeof(timezone_hour)); memset(timezone_min, 0, sizeof(timezone_min)); sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%*d+%2[0-9]:%2[0-9]", timezone_hour, timezone_min); if (timezone_hour[0] != 0) { strcat(sign, "+"); } else { // No timezone is expressed in the source date // the default UTC is added strcat(formatted_date, "+00:00"); } } if (sign[0] != 0) { if (timezone_hour[0] != 0) { strcat(formatted_date, sign); // Pad with 0 if an hour having only 1 digit was provided // +1 -> +01 if (strlen(timezone_hour) == 1) strcat(formatted_date, "0"); strcat(formatted_date, timezone_hour); strcat(formatted_date, ":"); } if (timezone_min[0] != 0) { strcat(formatted_date, timezone_min); // Pad with 0 if minutes having only 1 digit were provided // 3 -> 30 if (strlen(timezone_min) == 1) strcat(formatted_date, "0"); } else { // Minutes aren't expressed in the source date strcat(formatted_date, "00"); } } return (true); } /** * Append a set of readings to the readings table */ int Connection::appendReadings(const char *readings) { Document doc; SQLBuffer sql; int row = 0; bool add_row = false; ParseResult ok = doc.Parse(readings); if (!ok) { raiseError("appendReadings", GetParseError_En(doc.GetParseError())); return -1; } if (!doc.HasMember("readings")) { raiseError("appendReadings", "Payload is missing a readings array"); return -1; } Value &rdings = doc["readings"]; if (!rdings.IsArray()) { raiseError("appendReadings", "Payload is missing the readings array"); return -1; } const char *head = "INSERT INTO fledge.readings ( user_ts, asset_code, reading ) VALUES "; sql.append(head); int count = 0; for (Value::ConstValueIterator itr = rdings.Begin(); itr != rdings.End(); ++itr) { if (count == m_maxReadingRows) { sql.append(';'); const char *query = sql.coalesce(); logSQL("ReadingsAppend", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) != PGRES_COMMAND_OK) { raiseError("appendReadings", PQerrorMessage(dbConnection)); PQclear(res); return -1; } PQclear(res); sql.clear(); sql.append(head); count = 0; } if (!itr->IsObject()) { raiseError("appendReadings", "Each reading in the readings array must be an object"); return -1; } add_row = true; const char *asset_code = (*itr)["asset_code"].GetString(); if (strlen(asset_code) == 0) { Logger::getLogger()->warn("Postgres appendReadings - empty asset code value, row is ignored"); continue; } const char *str = (*itr)["user_ts"].GetString(); // Check if the string is a function if (isFunction(str)) { if (count) sql.append(", ("); else sql.append('('); sql.append(str); } else { char formatted_date[LEN_BUFFER_DATE] = {0}; if (! formatDate(formatted_date, sizeof(formatted_date), str) ) { raiseError("appendReadings", "Invalid date |%s|", str); add_row = false; } else { if (count) { sql.append(", ("); } else { sql.append('('); } sql.append('\''); sql.append(formatted_date); sql.append('\''); } } if (add_row) { row++; count++; // Handles - asset_code sql.append(",\'"); std::string escaped_asset(asset_code); std::string target ="'"; std::string replacement ="''"; StringReplaceAllEx(escaped_asset, target, replacement); sql.append(escaped_asset); sql.append("', '"); // Handles - reading StringBuffer buffer; Writer<StringBuffer> writer(buffer); (*itr)["reading"].Accept(writer); sql.append(escape(buffer.GetString())); sql.append("\' "); sql.append(')'); } } if (count == 0) { // No rows in final block return 0; } sql.append(';'); const char *query = sql.coalesce(); if (row > 0) { logSQL("ReadingsAppend", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return atoi(PQcmdTuples(res)); } raiseError("appendReadings", PQerrorMessage(dbConnection)); PQclear(res); return -1; } else { delete[] query; return 0; } } /** * Fetch a block of readings from the reading table */ bool Connection::fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet) { char sqlbuffer[200]; snprintf(sqlbuffer, sizeof(sqlbuffer), "SELECT id, asset_code, reading, user_ts AT TIME ZONE 'UTC' as \"user_ts\", ts AT TIME ZONE 'UTC' as \"ts\" FROM fledge.readings WHERE id >= %ld ORDER BY id LIMIT %d;", id, blksize); logSQL("ReadingsFetch", sqlbuffer); PGresult *res = PQexec(dbConnection, sqlbuffer); if (PQresultStatus(res) == PGRES_TUPLES_OK) { mapResultSet(res, resultSet); PQclear(res); return true; } raiseError("retrieve", PQerrorMessage(dbConnection)); PQclear(res); return false; } /** * Purge readings from the reading table */ unsigned int Connection::purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& result) { unsigned long rowidLimit = 0, minrowidLimit = 0, maxrowidLimit = 0, rowidMin; string sqlCommand; SQLBuffer sql; long unsentPurged = 0; long unsentRetained = 0; long numReadings = 0; bool flag_retain; int blocks = 0; struct timeval startTv{}, endTv{}; const char *logSection="ReadingsPurgeByAge"; Logger *logger = Logger::getLogger(); flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) { flag_retain = true; } Logger::getLogger()->debug("%s - flags :%X: flag_retain :%d: sent :%lu:", __FUNCTION__, flags, flag_retain, sent); // Prepare empty result result = "{ \"removed\" : 0, "; result += " \"unsentPurged\" : 0, "; result += " \"unsentRetained\" : 0, "; result += " \"readings\" : 0, "; result += " \"method\" : \"age\", "; result += " \"duration\" : 0 }"; logger->info("Purge starting..."); gettimeofday(&startTv, NULL); /* * We fetch the current rowid and limit the purge process to work on just * those rows present in the database when the purge process started. * This prevents us looping in the purge process if new readings become * eligible for purging at a rate that is faster than we can purge them. */ rowidLimit = purgeOperation("SELECT max(id) from fledge.readings;", logSection, "ReadingsPurgeByAge - phase 1, fetching maximum id", true); if (rowidLimit == -1) { return 0; } maxrowidLimit = rowidLimit; minrowidLimit = purgeOperation("SELECT min(id) from fledge.readings;", logSection, "ReadingsPurgeByAge - phase 1, fetching minimum id", true); if (minrowidLimit == -1) { return 0; } //### #########################################################################################: if (age == 0) { /* * An age of 0 means remove the oldest hours data. * So set age based on the data we have and continue. */ sqlCommand = "SELECT round(extract(epoch FROM (now() - min(user_ts)))/360) FROM fledge.readings WHERE id <=" + to_string (rowidLimit) + ";"; age = purgeOperation(sqlCommand.c_str() , logSection, "ReadingsPurgeByAge - phase 1, calculating age", true); if (age == -1) { return 0; } } Logger::getLogger()->debug("%s - rowidLimit :%lu: maxrowidLimit :%lu: maxrowidLimit :%lu: age :%lu:", __FUNCTION__, rowidLimit, maxrowidLimit, minrowidLimit, age); { /* * Refine rowid limit to just those rows older than age hours. */ unsigned long l = minrowidLimit; unsigned long r; if (flag_retain) { r = min(sent, rowidLimit); } else { r = rowidLimit; } r = max(r, l); logger->debug ("%s - l=%u, r=%u, sent=%u, rowidLimit=%u, minrowidLimit=%u, flags=%u", __FUNCTION__, l, r, sent, rowidLimit, minrowidLimit, flags); if (l == r) { logger->info("V2 No data to purge: min_id == max_id == %u", minrowidLimit); return 0; } unsigned long m=l; while (l <= r) { unsigned long midRowId = 0; unsigned long prev_m = m; m = l + (r - l) / 2; if (prev_m == m) break; // e.g. select id from readings where rowid = 219867307 AND user_ts < datetime('now' , '-24 hours', 'utc'); sqlCommand = "SELECT id FROM fledge.readings WHERE id = " + to_string (m) + " AND user_ts < (now() - INTERVAL '" + to_string (age) + " hours');"; midRowId = purgeOperation(sqlCommand.c_str() , logSection, "ReadingsPurgeByAge - phase 2, fetching midRowId", true); if (midRowId == -1) { return 0; } if (midRowId == 0) // mid row doesn't satisfy given condition for user_ts, so discard right/later half and look in left/earlier half { // search in earlier/left half r = m - 1; // The m position should be skipped as midRowId is 0 m = r; } else //if (l != m) { // search in later/right half l = m + 1; } } rowidLimit = m; Logger::getLogger()->debug("%s - s1 rowidLimit :%lu: minrowidLimit :%lu: maxrowidLimit :%lu:", __FUNCTION__, rowidLimit, minrowidLimit, maxrowidLimit); sqlCommand = "SELECT max(id) FROM fledge.readings WHERE id <= " + to_string (rowidLimit) + " AND user_ts < (now() - INTERVAL '" + to_string (age) + " hours');"; rowidLimit = purgeOperation(sqlCommand.c_str() , logSection, "ReadingsPurgeByAge - phase 2, checking rowidLimit", true); if (rowidLimit == -1) { return 0; } Logger::getLogger()->debug("%s - s2 rowidLimit :%lu: minrowidLimit :%lu: maxrowidLimit :%lu:", __FUNCTION__, rowidLimit, minrowidLimit, maxrowidLimit); if (minrowidLimit == rowidLimit) { logger->info("No data to purge"); return 0; } rowidMin = minrowidLimit; Logger::getLogger()->debug("%s - m :%lu: rowidMin :%lu: ",__FUNCTION__ ,m, rowidMin); } if ( ! flag_retain ) { unsigned long lastPurgedId; sqlCommand = "SELECT id FROM fledge.readings WHERE id = " + to_string (rowidLimit) + ";"; lastPurgedId = purgeOperation(sqlCommand.c_str() , logSection, "ReadingsPurgeByAge - phase 2, fetching unsentPurged", true); if (lastPurgedId == -1) { return 0; } if (sent != 0 && lastPurgedId > sent) // Unsent readings will be purged { // Get number of unsent rows we are about to remove unsentPurged = lastPurgedId - sent; } Logger::getLogger()->debug("%s - lastPurgedId :%d: unsentPurged :%ld:" ,__FUNCTION__, lastPurgedId, unsentPurged); } unsigned int deletedRows = 0; unsigned int rowsAffected, totTime=0, prevBlocks=0, prevTotTime=0; logger->info("Purge about to delete readings # %ld to %ld", rowidMin, rowidLimit); while (rowidMin < rowidLimit) { blocks++; rowidMin += purgeBlockSize; if (rowidMin > rowidLimit) { rowidMin = rowidLimit; } { sqlCommand = "DELETE FROM fledge.readings WHERE id <=" + to_string(rowidMin) + ";" ; START_TIME; rowsAffected = purgeOperation(sqlCommand.c_str() , logSection, "ReadingsPurgeByAge - phase 3, deleting readings", false); END_TIME; logger->debug("%s - DELETE sql :%s: rowsAffected :%ld:", __FUNCTION__, sqlCommand.c_str() ,rowsAffected); if (rowsAffected == -1) { return 0; } totTime += usecs; if(usecs>150000) { std::this_thread::sleep_for(std::chrono::milliseconds(100+usecs/10000)); } } deletedRows += rowsAffected; logger->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); if(blocks % RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS == 0) { int prevAvg = prevTotTime/(prevBlocks?prevBlocks:1); int currAvg = (totTime-prevTotTime)/(blocks-prevBlocks); int avg = ((prevAvg?prevAvg:currAvg)*5 + currAvg*5) / 10; // 50% weightage for long term avg and 50% weightage for current avg prevBlocks = blocks; prevTotTime = totTime; int deviation = abs(avg - TARGET_PURGE_BLOCK_DEL_TIME); logger->debug("blocks=%d, totTime=%d usecs, prevAvg=%d usecs, currAvg=%d usecs, avg=%d usecs, TARGET_PURGE_BLOCK_DEL_TIME=%d usecs, deviation=%d usecs", blocks, totTime, prevAvg, currAvg, avg, TARGET_PURGE_BLOCK_DEL_TIME, deviation); if (deviation > TARGET_PURGE_BLOCK_DEL_TIME/10) { float ratio = (float)TARGET_PURGE_BLOCK_DEL_TIME / (float)avg; if (ratio > 2.0) ratio = 2.0; if (ratio < 0.5) ratio = 0.5; purgeBlockSize = (float)purgeBlockSize * ratio; purgeBlockSize = purgeBlockSize / PURGE_BLOCK_SZ_GRANULARITY * PURGE_BLOCK_SZ_GRANULARITY; if (purgeBlockSize < MIN_PURGE_DELETE_BLOCK_SIZE) purgeBlockSize = MIN_PURGE_DELETE_BLOCK_SIZE; if (purgeBlockSize > MAX_PURGE_DELETE_BLOCK_SIZE) purgeBlockSize = MAX_PURGE_DELETE_BLOCK_SIZE; logger->debug("Changed purgeBlockSize to %d", purgeBlockSize); } std::this_thread::sleep_for(std::chrono::milliseconds(100)); } //Logger::getLogger()->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); } while (rowidMin < rowidLimit); logger->debug ("%s - sent=%u, minrowidLimit=%u, maxrowidLimit=%u, rowidLimit=%u deletedRows=%u", __FUNCTION__, sent, minrowidLimit, maxrowidLimit, rowidLimit, deletedRows); unsentRetained = maxrowidLimit - rowidLimit; numReadings = maxrowidLimit +1 - minrowidLimit - deletedRows; if (sent == 0) // Special case when no north process is used { unsentPurged = deletedRows; } ostringstream convert; unsigned long duration; gettimeofday(&endTv, NULL); duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; convert << " \"readings\" : " << numReadings << ", "; convert << " \"method\" : \"age\", "; convert << " \"duration\" : " << duration << " }"; result = convert.str(); duration = duration / 1000; // milliseconds logger->info("Purge process complete in %d blocks in %ld milliseconds", blocks, duration); Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result :%s:", __FUNCTION__, age, flags, flag_retain, result.c_str() ); return deletedRows; } /** * Execute a SQL command for the purge task */ unsigned long Connection::purgeOperation(const char *sql, const char *logSection, const char *phase, bool retrieve) { SQLBuffer sqlBuffer; const char *query; unsigned long value; PGresult *res; bool error; char *PGValue {}; error = false; value = 0; Logger::getLogger()->debug("%s - sql :%s: logSection :%s: phase :%s:", __FUNCTION__, sql, logSection, phase); sqlBuffer.append(sql); query = sqlBuffer.coalesce(); logSQL(logSection, query); res = PQexec(dbConnection, query); delete[] query; if (retrieve) { if (PQresultStatus(res) == PGRES_TUPLES_OK) { PGValue = PQgetvalue(res, 0, 0); if (PGValue) value = (unsigned long) atol(PGValue); } else { error = true; } } else { if (PQresultStatus(res) == PGRES_COMMAND_OK) { value = (unsigned long)atoi(PQcmdTuples(res)); } else { error = true; } } if (error) { raiseError(phase, PQerrorMessage(dbConnection)); value = -1; } PQclear(res); return value; } /** * Purge readings from the reading table leaving a number of rows equal to the parameter rows */ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsigned int flags, unsigned long sent, std::string& result) { unsigned long deletedRows = 0, unsentPurged = 0, unsentRetained = 0, numReadings = 0; unsigned long limit = 0; unsigned long rowcount, minId, maxId; unsigned long rowsAffectedLastComand; unsigned long deletePoint; struct timeval startTv, endTv; string sqlCommand; bool flag_retain; const char *logSection="ReadingsPurgeByRows"; Logger *logger = Logger::getLogger(); gettimeofday(&startTv, NULL); flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) { flag_retain = true; } Logger::getLogger()->debug(" %s - flags :%X: flag_retain :%s: sent :%ld:", __FUNCTION__, flags, flag_retain ? "true" : "false", sent); logger->info("Purge by Rows called"); if (flag_retain) { limit = sent; logger->info("Sent is %lu", sent); } logger->info("Purge by Rows called with flag_retain %X, rows %lu, limit %lu", flag_retain, rows, limit); rowcount = purgeOperation("SELECT count(*) from fledge.readings;", logSection, "ReadingsPurgeByRows - phase 1, fetching row count", true); if (rowcount == -1) { return 0; } maxId = purgeOperation("SELECT max(id) from fledge.readings;", logSection, "ReadingsPurgeByRows - phase 1, fetching maximum id", true); if (maxId == -1) { return 0; } numReadings = rowcount; rowsAffectedLastComand = 0; deletedRows = 0; do { if (rowcount <= rows) { logger->info("Row count %d is less than required rows %d", rowcount, rows); break; } minId = purgeOperation("SELECT min(id) from fledge.readings;", logSection, "ReadingsPurgeByRows - phase 2, fetching minimum id", true); if (minId == -1) { return 0; } deletePoint = minId + min(100000UL, rows); if (maxId - deletePoint < rows || deletePoint > maxId) deletePoint = maxId - rows; // Do not delete if (flag_retain) { if (limit < deletePoint) { deletePoint = limit; } } { logger->info("RowCount %lu, Max Id %lu, min Id %lu, delete point %lu", rowcount, maxId, minId, deletePoint); sqlCommand = "DELETE FROM fledge.readings WHERE id <= " + to_string(deletePoint); rowsAffectedLastComand = purgeOperation(sqlCommand.c_str(), logSection, "ReadingsPurgeByRows - phase 2, deleting readings", false); if (rowsAffectedLastComand != -1) // No error occured { deletedRows += rowsAffectedLastComand; numReadings -= rowsAffectedLastComand; rowcount -= rowsAffectedLastComand; logger->debug("Deleted %lu rows", rowsAffectedLastComand); if (rowsAffectedLastComand == 0) { break; } if (limit == 0) { // We may purge unsent rows if (minId > sent) { // The entire block was unsent unsentPurged += rowsAffectedLastComand; } else if (minId < sent && deletePoint > sent) { // Only part of the block was unsent long unsentBlock = rowsAffectedLastComand - (sent - minId); unsentPurged += unsentBlock; } } } } } while (rowcount > rows); if (limit) { unsentRetained = numReadings - rows; } gettimeofday(&endTv, NULL); unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; convert << " \"readings\" : " << numReadings << ", "; convert << " \"method\" : \"rows\", "; convert << " \"duration\" : " << duration << " }"; result = convert.str(); Logger::getLogger()->debug("%s - Purge by Rows complete - rows :%lu: flag :%x: sent :%lu: numReadings :%lu: rowsAffected :%u: result :%s:", __FUNCTION__, rows, flags, sent, numReadings, rowsAffectedLastComand, result.c_str() ); return deletedRows; } /** * Map a SQL result set to a JSON document */ void Connection::mapResultSet(PGresult *res, string& resultSet) { int nFields, i, j; Document doc; doc.SetObject(); // Create the JSON document Document::AllocatorType& allocator = doc.GetAllocator(); nFields = PQnfields(res); // No. of columns in resultset Value rows(kArrayType); // Setup a rows array Value count; count.SetInt(PQntuples(res)); // Create the count doc.AddMember("count", count, allocator); // Iterate over the rows for (i = 0; i < PQntuples(res); i++) { Value row(kObjectType); // Create a row for (j = 0; j < nFields; j++) { /** * TODO Improve handling of Oid's * * Current OID detection is based on * * SELECT oid, typname FROM pg_type; */ /** * If PQgetvalue() is pointer to an empty string, * we assume that is a NULL and we return * the "" value no matter the OID value */ if (!strlen(PQgetvalue(res, i, j))) { Value value("", allocator); Value name(PQfname(res, j), allocator); row.AddMember(name, value, allocator); // Get the next column continue; } /* PQgetvalue() has a value, check OID */ Oid oid = PQftype(res, j); switch (oid) { case 3802: // JSON type hard coded in this example: jsonb { Document d; if (d.Parse(PQgetvalue(res, i, j)).HasParseError()) { raiseError("resultSet", "Failed to parse: %s\n", PQgetvalue(res, i, j)); continue; } Value value(d, allocator); Value name(PQfname(res, j), allocator); row.AddMember(name, value, allocator); break; } case 23: //INT 4 bytes: int4 { int32_t intVal = atoi(PQgetvalue(res, i, j)); Value name(PQfname(res, j), allocator); row.AddMember(name, intVal, allocator); break; } case 21: //SMALLINT 2 bytes: int2 { int16_t intVal = (short)atoi(PQgetvalue(res, i, j)); Value name(PQfname(res, j), allocator); row.AddMember(name, intVal, allocator); break; } case 20: //BIG INT 8 bytes: int8 { int64_t intVal = atol(PQgetvalue(res, i, j)); Value name(PQfname(res, j), allocator); row.AddMember(name, intVal, allocator); break; } case 700: // float4 case 701: // float8 case 710: // this OID doesn't exist { double dblVal = atof(PQgetvalue(res, i, j)); Value name(PQfname(res, j), allocator); row.AddMember(name, dblVal, allocator); break; } case 1184: // Timestamp: timestamptz { char *str = PQgetvalue(res, i, j); Value value(str, allocator); Value name(PQfname(res, j), allocator); row.AddMember(name, value, allocator); break; } default: { char *str = PQgetvalue(res, i, j); if (oid == 1042) // char(x) rather than varchar so trim white space { str = trim(str); } Value value(str, allocator); Value name(PQfname(res, j), allocator); row.AddMember(name, value, allocator); break; } } } rows.PushBack(row, allocator); // Add the row } doc.AddMember("rows", rows, allocator); // Add the rows to the JSON /* Write out the JSON document we created */ StringBuffer buffer; Writer<StringBuffer> writer(buffer); doc.Accept(writer); resultSet = buffer.GetString(); } /** * Process the aggregate options and return the columns to be selected * * @param payload To evaluate for the generation of the SQLcommands * @param aggregates To evaluate for the generation of the SQL commands * @param jsonConstraint To evaluate for the generation of the SQL commands * @param isTableReading True if the handled table is the readings for which * a specific format should be applied * @param sql The sql commands relates to payload, aggregates * and jsonConstraint * */ bool Connection::jsonAggregates(const Value& payload, const Value& aggregates, SQLBuffer& sql, SQLBuffer& jsonConstraint, bool isTableReading) { if (aggregates.IsObject()) { if (! aggregates.HasMember("operation")) { raiseError("Select aggregation", "Missing property \"operation\""); return false; } if ((! aggregates.HasMember("column")) && (! aggregates.HasMember("json"))) { raiseError("Select aggregation", "Missing property \"column\" or \"json\""); return false; } string column_name = aggregates["column"].GetString(); sql.append(aggregates["operation"].GetString()); sql.append('('); if (aggregates.HasMember("column")) { if (strcmp(aggregates["operation"].GetString(), "count") != 0) { // an operation different from the 'count' is requested if (isTableReading && (column_name.compare("user_ts") == 0) ) { sql.append("to_char(user_ts, '" F_DATEH24_US "')"); } else { sql.append("\""); sql.append(column_name); sql.append("\""); } } else { // 'count' operation is requested sql.append(column_name); } } else if (aggregates.HasMember("json")) { const Value& json = aggregates["json"]; if (! json.IsObject()) { raiseError("Select aggregation", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } sql.append('('); sql.append("\""); sql.append(json["column"].GetString()); sql.append("\""); sql.append("->"); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (jsonFields.IsArray()) { if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } jsonConstraint.append(json["column"].GetString()); int field = 0; string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (field) { sql.append("->>"); } if (prev.length() > 0) { jsonConstraint.append("->>'"); jsonConstraint.append(prev); jsonConstraint.append("'"); } prev = itr->GetString(); field++; sql.append('\''); sql.append(itr->GetString()); sql.append('\''); } jsonConstraint.append(" ? '"); jsonConstraint.append(prev); jsonConstraint.append("'"); } else { sql.append('\''); sql.append(jsonFields.GetString()); sql.append('\''); if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(" ? '"); jsonConstraint.append(jsonFields.GetString()); jsonConstraint.append("'"); } sql.append(")::float"); } sql.append(") AS \""); if (aggregates.HasMember("alias")) { sql.append(aggregates["alias"].GetString()); } else { sql.append(aggregates["operation"].GetString()); sql.append('_'); sql.append(aggregates["column"].GetString()); } sql.append("\""); } else if (aggregates.IsArray()) { int index = 0; for (Value::ConstValueIterator itr = aggregates.Begin(); itr != aggregates.End(); ++itr) { if (!itr->IsObject()) { raiseError("select aggregation", "Each element in the aggregate array must be an object"); return false; } if ((! itr->HasMember("column")) && (! itr->HasMember("json"))) { raiseError("Select aggregation", "Missing property \"column\""); return false; } if (! itr->HasMember("operation")) { raiseError("Select aggregation", "Missing property \"operation\""); return false; } if (index) sql.append(", "); index++; sql.append((*itr)["operation"].GetString()); sql.append('('); if (itr->HasMember("column")) { string column_name= (*itr)["column"].GetString(); if (isTableReading && (column_name.compare("user_ts") == 0) ) { sql.append("to_char(user_ts, '" F_DATEH24_US "')"); } else { sql.append("\""); sql.append(column_name); sql.append("\""); } } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! json.IsObject()) { raiseError("Select aggregation", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } sql.append("CASE WHEN jsonb_typeof("); sql.append("\""); sql.append(json["column"].GetString()); sql.append("\""); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } jsonConstraint.append(json["column"].GetString()); if (jsonFields.IsArray()) { string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (prev.length() > 0) { jsonConstraint.append("->>'"); jsonConstraint.append(prev); jsonConstraint.append("'"); } prev = itr->GetString(); sql.append("->>'"); sql.append(itr->GetString()); sql.append('\''); } jsonConstraint.append(" ? '"); jsonConstraint.append(prev); jsonConstraint.append("'"); } else { sql.append("->'"); sql.append(jsonFields.GetString()); sql.append('\''); sql.append(") != 'number' THEN 0 ELSE ("); sql.append("\""); sql.append(json["column"].GetString()); sql.append("\""); sql.append("->>'"); sql.append(jsonFields.GetString()); sql.append('\''); jsonConstraint.append(" ? '"); jsonConstraint.append(jsonFields.GetString()); jsonConstraint.append("'"); sql.append(")::float"); } } sql.append(" END) AS \""); if (itr->HasMember("alias")) { sql.append((*itr)["alias"].GetString()); } else { sql.append((*itr)["operation"].GetString()); sql.append('_'); sql.append((*itr)["column"].GetString()); } sql.append("\""); } } if (payload.HasMember("group")) { sql.append(", "); if (payload["group"].IsObject()) { const Value& grp = payload["group"]; if (grp.HasMember("format")) { sql.append("to_char("); sql.append("\""); sql.append(grp["column"].GetString()); sql.append("\""); sql.append(", '"); sql.append(grp["format"].GetString()); sql.append("')"); } else { sql.append("\""); sql.append(grp["column"].GetString()); sql.append("\""); } if (grp.HasMember("alias")) { sql.append(" AS \""); sql.append(grp["alias"].GetString()); sql.append("\""); } else { sql.append(" AS \""); sql.append(grp["column"].GetString()); sql.append("\""); } } else { // Double quotes commented to allow a group by of the type : date(history_ts), key //sql.append("\""); sql.append(payload["group"].GetString()); //sql.append("\""); } } if (payload.HasMember("timebucket")) { const Value& tb = payload["timebucket"]; if (! tb.IsObject()) { raiseError("Select data", "The \"timebucket\" property must be an object"); return false; } if (! tb.HasMember("timestamp")) { raiseError("Select data", "The \"timebucket\" object must have a timestamp property"); return false; } if (tb.HasMember("format")) { sql.append(", to_char(to_timestamp("); } else { sql.append(", to_timestamp("); } if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); sql.append(" * "); } sql.append("floor(extract(epoch from "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); } else { sql.append(1); } sql.append("))"); if (tb.HasMember("format")) { sql.append(", '"); sql.append(tb["format"].GetString()); sql.append("')"); } sql.append(" AS \""); if (tb.HasMember("alias")) { sql.append(tb["alias"].GetString()); } else { sql.append("timestamp"); } sql.append('"'); } return true; } /** * Process the modifers for limit, skip, sort and group */ bool Connection::jsonModifiers(const Value& payload, SQLBuffer& sql) { if (payload.HasMember("timebucket") && payload.HasMember("sort")) { raiseError("query modifiers", "Sort and timebucket modifiers can not be used in the same payload"); return false; } // Count columns unsigned int nAggregates = 0; if (payload.HasMember("aggregate") && payload["aggregate"].IsArray()) { nAggregates = payload["aggregate"].Size(); } string groupColumn; if (payload.HasMember("group")) { sql.append(" GROUP BY "); if (payload["group"].IsObject()) { const Value& grp = payload["group"]; if (grp.HasMember("format")) { sql.append("to_char("); sql.append("\""); sql.append(grp["column"].GetString()); sql.append("\""); sql.append(", '"); sql.append(grp["format"].GetString()); sql.append("')"); // Get the column name in GROUP BY groupColumn = grp["column"].GetString(); } } else { // Double quotes commented to allow a group by of the type : date(history_ts), key //sql.append("\""); sql.append(payload["group"].GetString()); //sql.append("\""); // Get the column name in GROUP BY groupColumn = payload["group"].GetString(); } } if (payload.HasMember("sort")) { sql.append(" ORDER BY "); const Value& sortBy = payload["sort"]; if (sortBy.IsObject()) { if (! sortBy.HasMember("column")) { raiseError("Select sort", "Missing property \"column\""); return false; } // Check wether column name in GROUP BY is the same // of column name in ORDER BY if (!groupColumn.empty() && groupColumn.compare(sortBy["column"].GetString()) == 0 && nAggregates) { // Note that the GROUP BY column is added as last one // in the column names for SELECT // The ORDER BY column name is now replaced by a column // number, without double quotes // The column number is nAggregates + 1 // Example: SELECT MIN(id), MAX(id), AVG(id) .. // nAggregates value is 3 // Final SQL statement is: SELECT ... ORDER BY 4 sql.append(nAggregates + 1); } else { sql.append("\""); sql.append(sortBy["column"].GetString()); sql.append("\""); } sql.append(' '); if (! sortBy.HasMember("direction")) { sql.append("ASC"); } else { sql.append(sortBy["direction"].GetString()); } } else if (sortBy.IsArray()) { int index = 0; for (Value::ConstValueIterator itr = sortBy.Begin(); itr != sortBy.End(); ++itr) { if (!itr->IsObject()) { raiseError("select sort", "Each element in the sort array must be an object"); return false; } if (! itr->HasMember("column")) { raiseError("Select sort", "Missing property \"column\""); return false; } if (index) sql.append(", "); index++; sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(' '); if (! itr->HasMember("direction")) { sql.append("ASC"); } else { sql.append((*itr)["direction"].GetString()); } } } } if (payload.HasMember("timebucket")) { const Value& tb = payload["timebucket"]; if (! tb.IsObject()) { raiseError("Select data", "The \"timebucket\" property must be an object"); return false; } if (! tb.HasMember("timestamp")) { raiseError("Select data", "The \"timebucket\" object must have a timestamp property"); return false; } if (payload.HasMember("group")) { sql.append(", "); } else { sql.append(" GROUP BY "); } sql.append("floor(extract(epoch from "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); } else { sql.append(1); } sql.append(") ORDER BY "); sql.append("floor(extract(epoch from "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); } else { sql.append(1); } sql.append(") DESC"); } if (payload.HasMember("skip")) { if (!payload["skip"].IsInt()) { raiseError("skip", "Skip must be specfied as an integer"); return false; } sql.append(" OFFSET "); sql.append(payload["skip"].GetInt()); } if (payload.HasMember("limit")) { if (!payload["limit"].IsInt()) { raiseError("limit", "Limit must be specfied as an integer"); return false; } sql.append(" LIMIT "); try { sql.append(payload["limit"].GetInt()); } catch (exception e) { raiseError("limit", "Bad value for limit parameter: %s", e.what()); return false; } } return true; } /** * Convert a JSON where clause into a PostresSQL where clause * */ bool Connection::jsonWhereClause(const Value& whereClause, SQLBuffer& sql, vector<string> &asset_codes, bool convertLocaltime, // not in use const string prefix) { if (!whereClause.IsObject()) { raiseError("where clause", "The \"where\" property must be a JSON object"); return false; } if (!whereClause.HasMember("column")) { raiseError("where clause", "The \"where\" object is missing a \"column\" property"); return false; } if (!whereClause.HasMember("condition")) { raiseError("where clause", "The \"where\" object is missing a \"condition\" property"); return false; } // Handle WHERE 1 = 1, 0.55 = 0.55 etc string whereColumnName = whereClause["column"].GetString(); char* p; double converted = strtod(whereColumnName.c_str(), &p); if (*p) { // Double quote column name if (prefix.empty()) { sql.append("\""); } // Add prefix if (!prefix.empty()) { sql.append(prefix); } sql.append(whereColumnName); // Double quote column name if (prefix.empty()) { sql.append("\""); } } else { // Use numeric value sql.append(whereColumnName); } sql.append(' '); string cond = whereClause["condition"].GetString(); if (cond.compare("isnull") == 0) { sql.append("isnull "); } else if (cond.compare("notnull") == 0) { sql.append("notnull "); } else { if (!whereClause.HasMember("value")) { raiseError("where clause", "The \"where\" object is missing a \"value\" property"); return false; } if (!cond.compare("older")) { if (!whereClause["value"].IsInt()) { raiseError("where clause", "The \"value\" of an \"older\" condition must be an integer"); return false; } sql.append("< now() - INTERVAL '"); sql.append(whereClause["value"].GetInt()); sql.append(" seconds'"); } else if (!cond.compare("newer")) { if (!whereClause["value"].IsInt()) { raiseError("where clause", "The \"value\" of an \"newer\" condition must be an integer"); return false; } sql.append("> now() - INTERVAL '"); sql.append(whereClause["value"].GetInt()); sql.append(" seconds'"); } else if (!cond.compare("in") || !cond.compare("not in")) { // Check we have a non empty array if (whereClause["value"].IsArray() && whereClause["value"].Size()) { sql.append(cond); sql.append(" ( "); int field = 0; for (Value::ConstValueIterator itr = whereClause["value"].Begin(); itr != whereClause["value"].End(); ++itr) { if (field) { sql.append(", "); } field++; if (itr->IsNumber()) { if (itr->IsInt()) { sql.append(itr->GetInt()); } else if (itr->IsInt64()) { sql.append((long)itr->GetInt64()); } else { sql.append(itr->GetDouble()); } } else if (itr->IsString()) { sql.append('\''); sql.append(escape(itr->GetString())); sql.append('\''); } else { string message("The \"value\" of a \"" + \ cond + \ "\" condition array element must be " \ "a string, integer or double."); raiseError("where clause", message.c_str()); return false; } } sql.append(" )"); } else { string message("The \"value\" of a \"" + \ cond + "\" condition must be an array " \ "and must not be empty."); raiseError("where clause", message.c_str()); return false; } } else { sql.append(cond); sql.append(' '); if (whereClause["value"].IsInt()) { sql.append(whereClause["value"].GetInt()); } else if (whereClause["value"].IsString()) { sql.append('\''); string value = whereClause["value"].GetString(); sql.append(escape(value)); sql.append('\''); // Identify a specific operation to restrinct the tables involved if (whereColumnName.compare("asset_code") == 0) { if ( cond.compare("=") == 0) { asset_codes.push_back(value); } } } } } if (whereClause.HasMember("and")) { sql.append(" AND "); vector<string> asset_codes; if (!jsonWhereClause(whereClause["and"], sql, asset_codes, false, prefix)) { return false; } } if (whereClause.HasMember("or")) { vector<string> asset_codes; sql.append(" OR "); if (!jsonWhereClause(whereClause["or"], sql, asset_codes, false, prefix)) { return false; } } return true; } bool Connection::returnJson(const Value& json, SQLBuffer& sql, SQLBuffer& jsonConstraint) { if (! json.IsObject()) { raiseError("retrieve", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } sql.append(json["column"].GetString()); sql.append("->"); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (jsonFields.IsArray()) { if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } jsonConstraint.append(json["column"].GetString()); int field = 0; string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (field) { sql.append("->"); } if (prev.length()) { jsonConstraint.append("->'"); jsonConstraint.append(prev); jsonConstraint.append('\''); } field++; sql.append('\''); sql.append(itr->GetString()); sql.append('\''); prev = itr->GetString(); } jsonConstraint.append(" ? '"); jsonConstraint.append(prev); jsonConstraint.append("'"); } else { sql.append('\''); sql.append(jsonFields.GetString()); sql.append('\''); if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(" ? '"); jsonConstraint.append(jsonFields.GetString()); jsonConstraint.append("'"); } return true; } /** * Remove whitespace at both ends of a string */ char *Connection::trim(char *str) { char *ptr; while (*str && *str == ' ') str++; ptr = str + strlen(str) - 1; while (ptr > str && *ptr == ' ') { *ptr = 0; ptr--; } return str; } /** * Raise an error to return from the plugin */ void Connection::raiseError(const char *operation, const char *reason, ...) { ConnectionManager *manager = ConnectionManager::getInstance(); char tmpbuf[512]; va_list ap; va_start(ap, reason); vsnprintf(tmpbuf, sizeof(tmpbuf), reason, ap); va_end(ap); Logger::getLogger()->error("PostgreSQL storage plugin raising error: %s", tmpbuf); manager->setError(operation, tmpbuf, false); } /** * Return the sie of a given table in bytes */ long Connection::tableSize(const string& table) { SQLBuffer buf; buf.append("SELECT pg_total_relation_size(relid) FROM pg_catalog.pg_statio_user_tables WHERE relname = '"); buf.append(table); buf.append("'"); const char *query = buf.coalesce(); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_TUPLES_OK) { long tSize = atol(PQgetvalue(res, 0, 0)); PQclear(res); return tSize; } raiseError("tableSize", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Add double quotes for words that are reserved as a column name * Sample : user to "user" * * @param column_name Column name to be evaluated * @param out Final name of the column */ const string Connection::double_quote_reserved_column_name(const string &column_name) { string final_column_name; if ( std::find(pg_column_reserved_words.begin(), pg_column_reserved_words.end(), column_name) != pg_column_reserved_words.end() ) { final_column_name = "\"" + column_name + "\""; } else { final_column_name = column_name; } return(final_column_name); } /** * Converts the input string quoting the double quotes : " to \" * * @param str String to convert * @param out Converted string */ const string Connection::escape_double_quotes(const string& str) { char *buffer; const char *p1; char *p2; string newString; if (str.find_first_of('\"') == string::npos) { return str; } buffer = (char *)malloc(str.length() * 2); p1 = str.c_str(); p2 = buffer; while (*p1) { if (*p1 == '\"') { *p2++ = '\\'; *p2++ = '\"'; p1++; } else if (*p1 == '\\' ) // Take care of previously escaped quotes { *p2++ = '\\'; *p2++ = '\\'; p1++; } else { *p2++ = *p1++; } } *p2 = 0; newString = string(buffer); free(buffer); return newString; } const string Connection::escape(const string& str) { char *buffer; const char *p1; char *p2; string newString; if (str.find_first_of('\'') == string::npos) { return str; } buffer = (char *)malloc(str.length() * 2); p1 = str.c_str(); p2 = buffer; while (*p1) { if (*p1 == '\'') { *p2++ = '\''; *p2++ = '\''; p1++; } else { *p2++ = *p1++; } } *p2 = 0; newString = string(buffer); free(buffer); return newString; } /** * Optionally log SQL statement execution * * @param tag A string tag that says why the SQL is being executed * @param stmt The SQL statement itself */ void Connection::logSQL(const char *tag, const char *stmt) { if (m_logSQL) { Logger::getLogger()->info("%s: %s", tag, stmt); } } /** * Create snapshot of a common table * * @param table The table to snapshot * @param id The snapshot id * @return -1 on error, >= 0 on success * * The new created table name has the name: * $table_snap$id */ int Connection::create_table_snapshot(const string& table, const string& id) { string query = "SELECT * INTO TABLE fledge."; query += table + "_snap" + id + " FROM fledge." + table; logSQL("CreateTableSnapshot", query.c_str()); PGresult *res = PQexec(dbConnection, query.c_str()); if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return 1; } raiseError("create_table_snapshot", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Set the contents of a common table from a snapshot * * @param table The table to fill * @param id The snapshot id of the table * @return -1 on error, >= 0 on success * */ int Connection::load_table_snapshot(const string& table, const string& id) { string purgeQuery = "DELETE FROM fledge." + table; string query = "START TRANSACTION; " + purgeQuery; query += "; INSERT INTO fledge." + table; query += " SELECT * FROM fledge." + table + "_snap" + id; query += "; COMMIT;"; logSQL("LoadTableSnapshot", query.c_str()); PGresult *res = PQexec(dbConnection, query.c_str()); if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return 1; } else { PGresult *resRollback = PQexec(dbConnection, "ROLLBACK;"); if (PQresultStatus(resRollback) != PGRES_COMMAND_OK) { raiseError(" rollback load_table_snapshot", PQerrorMessage(dbConnection)); } PQclear(resRollback); } raiseError("load_table_snapshot", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Delete a snapshot of a common table * * @param table The table to snapshot * @param id The snapshot id * @return -1 on error, >= 0 on success */ int Connection::delete_table_snapshot(const string& table, const string& id) { string query = "DROP TABLE fledge." + table + "_snap" + id; logSQL("DeleteTableSnapshot", query.c_str()); PGresult *res = PQexec(dbConnection, query.c_str()); if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return 1; } raiseError("delete_table_snapshot", PQerrorMessage(dbConnection)); PQclear(res); return -1; } /** * Get list of snapshots for a given common table * * @param table The given table name * @param resultSet Output data buffer * @return True on success, false on database errors */ bool Connection::get_table_snapshots(const string& table, string& resultSet) { SQLBuffer sql; try { sql.append("SELECT REPLACE(table_name, '"); sql.append(table); sql.append("_snap', '') AS id FROM information_schema.tables "); sql.append("WHERE table_schema = 'fledge' AND table_name LIKE '"); sql.append(table); sql.append("_snap%';"); const char *query = sql.coalesce(); logSQL("GetTableSnapshots", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_TUPLES_OK) { mapResultSet(res, resultSet); PQclear(res); return true; } char *SQLState = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (!strcmp(SQLState, "22P02")) // Conversion error { raiseError("get_table_snapshots", "Unable to convert data to the required type"); } else { raiseError("get_table_snapshots", PQerrorMessage(dbConnection)); } PQclear(res); return false; } catch (exception e) { raiseError("get_table_snapshots", "Internal error: %s", e.what()); } return false; } /** * Check to see if the str is a function * * @param str The string to check * @return true if the string contains a function call */ bool Connection::isFunction(const char *str) const { return strcmp(str, "now()") == 0; } /** * In the case of a join add the columns to select from for all the tables in * the join * * @param document The query we are processing * @param sql The SQLBuffer we are writing * @param level The table number we are processing */ bool Connection::selectColumns(const Value& document, SQLBuffer& sql, int level) { SQLBuffer jsonConstraints; string tag = "t" + to_string(level) + "."; if (document.HasMember("return")) { int col = 0; const Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { sql.append(tag); sql.append(itr->GetString()); } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } sql.append("to_char("); sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(", '"); sql.append((*itr)["format"].GetString()); sql.append("')"); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } sql.append("\""); sql.append((*itr)["column"].GetString()); sql.append("\""); sql.append(" AT TIME ZONE '"); sql.append((*itr)["timezone"].GetString()); sql.append("' "); } else { sql.append(tag); sql.append((*itr)["column"].GetString()); } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } } else { sql.append('*'); return true; } if (document.HasMember("join")) { const Value& join = document["join"]; if (join.HasMember("query")) { const Value& query = join["query"]; sql.append(", "); if (!selectColumns(query, sql, ++level)) { raiseError("commonRetrieve", "Join failed to add select columns"); return false; } } } return true; } /** * In the case of a join add the tables to select from for all the tables in * the join * * @param document The query we are processing * @param sql The SQLBuffer we are writing * @param level The table number we are processing */ bool Connection::appendTables(const string& schema, const Value& document, SQLBuffer& sql, int level) { string tag = "t" + to_string(level); if (document.HasMember("join")) { const Value& join = document["join"]; if (join.HasMember("table")) { const Value& table = join["table"]; if (!table.HasMember("name")) { raiseError("commonRetrieve", "Joining table is missing a table name"); return false; } const Value& name = table["name"]; if (!name.IsString()) { raiseError("commonRetrieve", "Joining table name is not a string"); return false; } sql.append(", "); sql.append(schema); sql.append('.'); sql.append(name.GetString()); sql.append(" "); sql.append(tag); if (join.HasMember("query")) { const Value& query = join["query"]; appendTables(schema, query, sql, ++level); } else { raiseError("commonRetrieve", "Join is missing a join query definition"); return false; } } else { raiseError("commonRetrieve", "Join is missing a table definition"); return false; } } return true; } /** * Recurse down and add the where cluase and join terms for each * new table joined to the query * * @param query The JSON query * @param sql The SQLBuffer we are writing the data to * @param asset_codes The asset codes * @param level The nesting level of the joined table */ bool Connection::processJoinQueryWhereClause(const Value& query, SQLBuffer& sql, std::vector<std::string> &asset_codes, int level) { string tag = "t" + to_string(level) + "."; if (!jsonWhereClause(query["where"], sql, asset_codes, false, tag)) { return false; } if (query.HasMember("join")) { // Now and the join condition itself string col0, col1; const Value& join = query["join"]; if (join.HasMember("on") && join["on"].IsString()) { col0 = join["on"].GetString(); } else { return false; } if (join.HasMember("table")) { const Value& table = join["table"]; if (table.HasMember("column") && table["column"].IsString()) { col1 = table["column"].GetString(); } else { raiseError("Joined query", "Missing join column in table"); return false; } } sql.append(" AND "); sql.append(tag); sql.append(col0); sql.append(" = t"); sql.append(level + 1); sql.append("."); sql.append(col1); sql.append(" "); if (join.HasMember("query") && join["query"].IsObject()) { sql.append(" AND "); const Value& query = join["query"]; processJoinQueryWhereClause(query, sql, asset_codes, level + 1); } } return true; } /** * Find existing payload schema from the DB fledge.service_schema table * * @param service The string containing service name * @param name The string containing schema name * @return resultSet string containing the output of the sql query executed */ bool Connection::findSchemaFromDB(const std::string &service, const std::string &schema, std::string &resultSet) { SQLBuffer sql; try { sql.append("select * from fledge.service_schema where service = '"); sql.append(service); sql.append("'"); sql.append(" and name = '"); sql.append(schema); sql.append("';"); const char *query = sql.coalesce(); logSQL("findSchemaFromDB", query); PGresult *res = PQexec(dbConnection, query); delete[] query; if (PQresultStatus(res) == PGRES_TUPLES_OK) { mapResultSet(res, resultSet); PQclear(res); return true; } else { char *SQLState = PQresultErrorField(res, PG_DIAG_SQLSTATE); if (!strcmp(SQLState, "22P02")) // Conversion error { raiseError("findSchemaFromDB", "Unable to convert data to the required type"); } else { raiseError("findSchemaFromDB", PQerrorMessage(dbConnection)); } PQclear(res); return false; } }catch (exception e) { raiseError("findSchemaFromDB", "Internal error: %s", e.what()); } return false; } /** * This function parses the fledge.service_schema table payload retrieved in * and outputs a set of data structures containg the information about the tables * and their columns and indexes * * @param[out] version version retrieved form payload * @param[in] res output containing payload information * @param[out] tableColumnMap map[tablename ---> set of columns] * @param[out] tableIndexMap map[tablename ---> indexes] where each index is a comma separated string of columns * @param[ouy] schemaCreationRequest which is like this is first schema creation request or * schema already exist in the DB * @return true if parsing is successful else false */ bool Connection::parseDatabaseStorageSchema(int &version,const std::string &res, std::unordered_map<std::string, std::unordered_set<columnRec, columnRecHasher, columnRecComparator> > &tableColumnMap, std::unordered_map<std::string, std::vector<std::string> > &tableIndexMap, bool &schemaCreationRequest) { Document document; if (document.Parse(res.c_str()).HasParseError()) { raiseError("parseDatabaseStorageSchema", "%s:%d Failed to parse JSON payload (DB query response) %s at %d",__FUNCTION__, __LINE__, GetParseError_En(document.GetParseError()), document.GetErrorOffset()); return false; } if (!document.HasMember("count")) { raiseError("parseDatabaseStorageSchema", "%s:%d count absent from database query response to fledge.service_schema",__FUNCTION__, __LINE__); return false; } int count = document["count"].GetInt(); if ( count == 0) { Logger::getLogger()->debug("%s:%d count = 0, returning from function parseDatabaseStorageSchema", __FUNCTION__, __LINE__); schemaCreationRequest = true; return true; } if (!document.HasMember("rows")) { raiseError("parseDatabaseStorageSchema", "%s:%d rows absent from database query reponse to fledge.service_schema", __FUNCTION__, __LINE__); return false; } else { Value& rows = document["rows"]; if (!rows.IsArray()) { raiseError("parseDatabaseStorageSchema", "%s:%d The property rows in database query reponse to fledge.service_schema must be an array", __FUNCTION__, __LINE__); return false; } else { if (rows.Size() < 1) { raiseError("parseDatabaseStorageSchema", "%s:%d rows array from database query reponse to fledge.service_schema has size 0", __FUNCTION__, __LINE__); return false; } // The above check ensures rows[0] can be accessed Value& firstRow = rows[0]; if (!firstRow.HasMember("version")) { raiseError("parseDatabaseStorageSchema", "%s:%d rows[0] in fledge.service_schema does not have version", __FUNCTION__, __LINE__); return false; } if(!firstRow["version"].IsInt()) { raiseError("parseDatabaseStorageSchema", "%s %d extracting version in rows[0],expecting an int value here", __FUNCTION__, __LINE__); return false; } version = firstRow["version"].GetInt(); if (!firstRow.HasMember("definition")) { raiseError("parseDatabaseStorageSchema", "%s:%d rows[0] in fledge.service_schema does not have definition", __FUNCTION__, __LINE__); return false; } if (!firstRow["definition"].IsString()) { raiseError("parseDatabaseStorageSchema", "%s:%d The property definition in rows[0] in fledge.service_schema must be a string", __FUNCTION__, __LINE__); return false; } std::string defStr = firstRow["definition"].GetString(); if (defStr.empty()) { raiseError("parseDatabaseStorageSchema", "%s:%d The rows[0][definition] in fledge.service_schema is empty", __FUNCTION__, __LINE__); return false; } Document docDefStr; if (docDefStr.Parse(defStr.c_str()).HasParseError()) { raiseError("parseDatabaseStorageSchema", "%s:%d Failed to parse JSON starting at definition in database query reponse to fledge.service_schema %s:%d ", __FUNCTION__, __LINE__, GetParseError_En(docDefStr.GetParseError()),docDefStr.GetErrorOffset()); return false; } if (!docDefStr.HasMember("tables")) { raiseError("parseDatabaseStorageSchema", "%s:%d tables section not present in payload obtained from fledge.service_schema ",__FUNCTION__, __LINE__); return false; } Value& tables = docDefStr["tables"]; if (!tables.IsArray()) { raiseError("parseDatabaseStorageSchema", "%s:%d The tables section obtained from payload in fledge.service_schema must be anarray", __FUNCTION__, __LINE__); return false; } // Iterate over the table s list and prepare the data structures for (rapidjson::SizeType i = 0; i < tables.Size(); i++) { if (!tables[i].HasMember("name")) { raiseError("parseDatabaseStorageSchema", "%s:%d The tables[%d] section in payload in fledge.service_schema does not have name field", __FUNCTION__, __LINE__, i); return false; } if (!tables[i]["name"].IsString()) { raiseError("parseDatabaseStorageSchema", "%s:%d The property name in tables[%d] in fledge.service_schema must be a string", __FUNCTION__, __LINE__, i); return false; } std::string name = tables[i]["name"].GetString(); if (!tables[i].HasMember("columns")) { raiseError("parseDatabaseStorageSchema", "%s:%d The tables[%d] section in payload in fledge.service_schema does not have columns field", __FUNCTION__, __LINE__, i); return false; } Value& columns = tables[i]["columns"]; std::unordered_set<columnRec, columnRecHasher, columnRecComparator> columnSet; std::vector<std::string> indexesVec; if (!columns.IsArray()) { raiseError("parseDatabaseStorageSchema", "%s:%d The property columns in table %s must be an array", __FUNCTION__, __LINE__, name.c_str()); return false; } Logger::getLogger()->debug("%s:%d Extracting the columns of table name %s", __FUNCTION__, __LINE__, name.c_str()); for (auto& v : columns.GetArray()) { if (v.IsObject()) { if (v.HasMember("column")) { if (!v["column"].IsString()) { Logger::getLogger()->error("%s :%d, table %s,extracting column name, expecting a string value here", __FUNCTION__, __LINE__, name.c_str()); } else { columnRec c; c.column = v["column"].GetString(); if ( c.column.empty()) { raiseError("parseDatabaseStorageSchema", "%s :%d, table %s, column name empty,inconsistent DB", __FUNCTION__, __LINE__, name.c_str()); return false; } if (v.HasMember("type")) { if (!v["type"].IsString()) { Logger::getLogger()->error("%s:%d tablename %s, column = %s, extracting column type, expecting a string value here", __FUNCTION__, __LINE__,name.c_str(), c.column.c_str()); } c.type = v["type"].GetString(); } if (v.HasMember("size")) { if (!v["size"].IsInt()) { Logger::getLogger()->error("%s:%d, tableName = %s, column = %s,extracting column size, expecting an int value here", __FUNCTION__, __LINE__,name.c_str(), c.column.c_str()); } c.sz = v["size"].GetInt(); } if (v.HasMember("key")) { if (!v["key"].IsBool()) { Logger::getLogger()->error("%s:%d, tableName = %s, column = %s,extracting column key, expecting a bool value here", __FUNCTION__, __LINE__, name.c_str(), c.column.c_str()); } else { if (v["key"].GetBool()) { c.key = true; } } } columnSet.insert(c); } } } } Logger::getLogger()->debug("%s:%d Extracting the indexes of tables[%d]", __FUNCTION__, __LINE__, i); if (!tables[i].HasMember("indexes")) { Logger::getLogger()->debug("%s:%d The tables[%d] section in payload in fledge.service_schema does not have indexes field", __FUNCTION__, __LINE__, i); } else { Value& indexes = tables[i]["indexes"]; if (!indexes.IsArray()) { raiseError("parseDatabaseStorageSchema", "%s:%d The property indexes under tablename = %s must be an array", __FUNCTION__, __LINE__, name.c_str()); return false; } for (auto& v : indexes.GetArray()) { std::vector<std::string> indexVec; std::string s; if (v.IsObject()) { if (v.HasMember("index")) { if (!v["index"].IsArray()) { raiseError("parseDatabaseStorageSchema", "%s:%d, tableName = %s, extracting index values, expecting an array here", __FUNCTION__, __LINE__, name.c_str()); return false; } else { for (auto& i : v["index"].GetArray()) { if (!i.IsString()) { raiseError("parseDatabaseStorageSchema", "%s:%d, tableName = %s, extracting index ,expecting a string here", __FUNCTION__, __LINE__, name.c_str()); return false; } indexVec.push_back(i.GetString()); } std::sort(indexVec.begin(), indexVec.end()); for ( int i = 0; i < indexVec.size(); ++i) { s.append(indexVec[i]); if ( i < indexVec.size() -1 ) s.append(","); } } } } indexesVec.push_back(s); } } tableColumnMap[name] = columnSet; tableIndexMap[name] = indexesVec; } } } return true; } /** * Create schema of tables * * @param payload The payload containing information about schema of * tables to create * @return true if the tables can be crated successfully */ int Connection::create_schema(const std::string &payload) { Document document; std::string schema; int version; const char *logSection="CreatingSchema"; unsigned long rowsAffectedLastCommand = 0; std::unordered_map<std::string, std::unordered_set<columnRec, columnRecHasher, columnRecComparator> > columnMapFromDB; std::unordered_map<std::string, std::vector<std::string> > indexMapFromDB; bool schemaCreationReq = false; std::vector<sqlQuery> queries; try { if (payload.empty()) { raiseError("create_schema", "%s:%d function's input parameter payload empty", __FUNCTION__, __LINE__); return -1; } else { if (document.Parse(payload.c_str()).HasParseError()) { raiseError("create_schema", "%s:%d Failed to parse JSON payload %s:%d", __FUNCTION__, __LINE__, GetParseError_En(document.GetParseError()), document.GetErrorOffset()); return -1; } if (!document.HasMember("schema")) { raiseError("create_schema", "%s:%d schema absent from input parameter JSON payload", __FUNCTION__, __LINE__); return -1; } else { if (!document["schema"].IsString()) { raiseError("create_schema", "%s:%d The property schema in JSON payload must be a string", __FUNCTION__, __LINE__); return -1; } schema = document["schema"].GetString(); if (schema.empty()) { raiseError("create_schema", "%s:%d schema obtained from payload is empty", __FUNCTION__, __LINE__); return -1; } Logger::getLogger()->debug("%s:%d schema obtained from payload = %s", __FUNCTION__, __LINE__, schema.c_str()); if (!document.HasMember("service")) { raiseError("create_schema", "%s:%d service absent from payload for schema %s", __FUNCTION__, __LINE__, schema.c_str()); return -1; } if (!document["service"].IsString()) { raiseError("create_schema", "%s:%d The property service in JSON payload must be a string", __FUNCTION__, __LINE__); return -1; } std::string service = document["service"].GetString(); if (service.empty()) { raiseError("create_schema", "%s:%d empty service name for schema %s", __FUNCTION__, __LINE__, schema.c_str()); return -1; } Logger::getLogger()->debug("%s:%d service obtained from payload = %s", __FUNCTION__, __LINE__, service.c_str()); if (!document.HasMember("version")) { raiseError("create_schema", "%s:%d version absent from payload for schema %s and service %s", __FUNCTION__, __LINE__, schema.c_str(), service.c_str()); return -1; } else { if(!document["version"].IsInt()) { raiseError("create_schema", "%s %d version needs to be int for schema %s and service %s", __FUNCTION__, __LINE__, schema.c_str(), service.c_str()); return -1; } version = document["version"].GetInt(); Logger::getLogger()->debug("%s:%d version obtained from payload = %d", __FUNCTION__, __LINE__, version); std::string results; if (findSchemaFromDB(service, schema, results)) { if (!parseDatabaseStorageSchema(version, results, columnMapFromDB, indexMapFromDB, schemaCreationReq)) { raiseError("create_schema", "%s:%d error in parsing Database Storage schema %s for schema and service %s", __FUNCTION__, __LINE__, schema.c_str(), service.c_str()); return -1; } } else { raiseError("create_schema", "%s:%d findSchemaFromDB returned false, error in database query execution for service %s, schema %s", __FUNCTION__, __LINE__, service.c_str(), schema.c_str()); return -1; } std::string queryToCreateSchema = "create schema if not exists " + schema + ";" ; rowsAffectedLastCommand = purgeOperation(queryToCreateSchema.c_str(), logSection, "Create Schema if not exists ", false); if (rowsAffectedLastCommand == -1) { raiseError("create_schema", "%s:%d Error in creating schema %s in database, query executed = %s",__FUNCTION__,__LINE__, schema.c_str(), queryToCreateSchema.c_str()); return -1; } } if (!document.HasMember("tables")) { raiseError("create_schema", "%s:%d tables section absent from payload for schema %s and service %s", __FUNCTION__, __LINE__, schema.c_str(), service.c_str()); return -1; } else { Logger::getLogger()->debug("%s:%d Extracting tables from payload for schema %s and service %s", __FUNCTION__, __LINE__, schema.c_str() , service.c_str()); Value& tables = document["tables"]; if (!tables.IsArray()) { raiseError("create_schema", "%s:%d, Schema %s, Service %s, The property tables must be an array", __FUNCTION__, __LINE__, schema.c_str(), service.c_str()); return -1; } else { std::unordered_set<std::string> unSetTablesInSchemaRequest; std::string sqlDropTables; // Iterate over all the table lists in the Schema Creation/Alter request for (rapidjson::SizeType i = 0; i < tables.Size(); i++) { if (!tables[i].HasMember("name")) { raiseError("create_schema", "%s:%d Schema %s, Service %s : The tables[%d] section in payload does not have name field", __FUNCTION__, __LINE__,schema.c_str(), service.c_str(), i); return -1; } if (!tables[i]["name"].IsString()) { raiseError("create_schema", "%s:%d , Schema %s, Service %s, The property name in tables[%d] must be a string", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), i); return -1; } std::string name = tables[i]["name"].GetString(); if (name.empty()) { raiseError("create_schema", "%s:%d Schema %s, Service %s, The property name in tables[%d] is empty", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), i); return -1; } Logger::getLogger()->debug("%s:%d Extracting columns for schema %s, service %s, table name %s ", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); unSetTablesInSchemaRequest.insert(name); if (!tables[i].HasMember("columns")) { raiseError("create_schema", "%s:%d The tables section does not have columns field", __FUNCTION__, __LINE__); return -1; } Value& columns = tables[i]["columns"]; if (!columns.IsArray()) { raiseError("create_schema", "%s:%d The property columns must be an array", __FUNCTION__, __LINE__); return -1; } std::vector<std::string> indexesMatrixFromReq; std::unordered_set<columnRec, columnRecHasher, columnRecComparator> colsPerTableInReq; bool alterTable = false; std::string sql, sqlIdx; // if this is schema creation request or // this table does not exist in db, then create it // else alter the table if (schemaCreationReq || (columnMapFromDB.find(name) == columnMapFromDB.end())) { sql = "create table " + schema + "." + name + " (" ; } else { sql = "alter table " + schema + "." + name + " " ; alterTable = true; } // Iterate over the columns array // For each column, find name, type, size, primary key or not // and store in colsPerTableInReq for (auto& v : columns.GetArray()) { if (v.IsObject()) { columnRec c; if (v.HasMember("column")) { if (!v["column"].IsString()) { raiseError("create_schema", "%s %d Schema: %s, Service: %s ,table name %s , extracting column name, expecting a string value here", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); return -1; } else { c.column = v["column"].GetString(); if (c.column.empty()) { raiseError("create_schema", "%s %d Schema: %s, Service: %s ,table name %s, extracting column, found empty value for column", __FUNCTION__, __LINE__, schema.c_str(), service.c_str() , name.c_str()); return -1; } } } if (v.HasMember("type")) { if (!v["type"].IsString()) { raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName : %s , extracting type, expecting a string value here", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); return -1; } else { c.type = v["type"].GetString(); if (c.type == "double") c.type = "real"; if (!checkValidDataType(c.type)) { raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName : %s , type %s extracted is not a valid data type", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str(), c.type.c_str()); return -1; } } } if (v.HasMember("size")) { if(!v["size"].IsInt()) { raiseError("create_schema", "%s %d Schema:%s, Service:%s, tableName:%s ,extracting size, expecting an int value here", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); return -1; } else { c.sz = v["size"].GetInt(); } } if (v.HasMember("key")) { if(!v["key"].IsBool()) { raiseError("create_schema", "%s %d Schema:%s, Service:%s, tableName:%s, extracting key, expecting a bool value here", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); return -1; } else { c.key = v["key"].GetBool(); } } colsPerTableInReq.insert(c); } } // Iterate over all the indexes per table and store in indexesMatrixFromReq if (!tables[i].HasMember("indexes")) { //Indexes are optional,if absent, will not trigger an exit from function Logger::getLogger()->debug("%s:%d Schema:%s, Service:%s, tableName:%s does not have indexes field", __FUNCTION__, __LINE__ ,schema.c_str(), service.c_str(), name.c_str()); } else { Value& idx = tables[i]["indexes"]; if (!idx.IsArray()) { // make sure if indexes are present, their type in JSON is valid raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName:%s The property indexes must be an array", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); return -1; } else { Logger::getLogger()->debug("%s:%d Extracting indexes for Schema:%s, Service:%s, tableName: %s", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); for (auto& v : idx.GetArray()) { std::vector<std::string> indexVec; std::string s; if (v.IsObject()) { if (v.HasMember("index")) { if (!v["index"].IsArray()) { raiseError("create_schema", "%s %d Schema:%s, Service:%s, tableName:%s , extracting index values, expecting an array here", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); return -1; } else { // keep the cols in indexes as a comma separated list of sorted columns for (auto& i : v["index"].GetArray()) { indexVec.push_back(i.GetString()); } std::sort(indexVec.begin(), indexVec.end()); for (auto i = 0; i < indexVec.size(); ++i) { s.append(indexVec[i]); if (i < indexVec.size() -1){ s.append(","); } } } } } indexesMatrixFromReq.push_back(s); } } } // Traverse through the colums list found in DB for this table // and create/alter/delete the colums list // unordered_set<columnRec, columnRecHasher, columnRecComparator> *dbCol = nullptr; if (columnMapFromDB.find(name) != columnMapFromDB.end()) { dbCol = &columnMapFromDB[name]; Logger::getLogger()->debug("%s:%d Schema:%s, Service:%s, tableName: %s found in Database ", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); } else { Logger::getLogger()->debug("%s:%d Schema:%s, Service:%s, tableName: %s could not be found in Database ", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str()); } bool columnsToAlter = false; for ( auto& v: colsPerTableInReq) { // table creation case if (!alterTable) { sql += v.column + " " + v.type; if (v.type == "varchar") { sql += "(" + std::to_string(v.sz) + ")"; } if (v.key == true) { sql += " primary key"; } sql +=","; } else { // alter table case, table already exists // check if column already exists in database // if not then add if not a key column if (dbCol != nullptr && (dbCol->find(v) == dbCol->end())) { // if it is not a key then add the column else log error if (!v.key) { sql += "add column "; sql += v.column + " " + v.type; if (v.type == "varchar") { sql += "(" + std::to_string(v.sz) + ")"; } sql +=","; columnsToAlter = true; } else { // altering a key is not allowed // column in req does not exist in DB // but is key, not allowed raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName:%s, altering key request(%s) is not allowed for an existing table, dropping the schema request", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str(), v.column.c_str()); return -1; } } else { // altering an existing column not alllowed // This condition means , column in req already present in DB if (dbCol != nullptr) { auto itr = dbCol->find(v); //Check if the column matches exactly with that present in db , if not same , the reject the request // We ignore size for integer columns if (v.type.compare("integer") == 0) { if (itr->type != v.type || itr->key != v.key) { raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName:%s, altering an existing column %s is not allowed", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str(), v.column.c_str() ); return -1; } } else if (itr->type != v.type || itr->sz != v.sz || itr->key != v.key) { raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName:%s, altering an existing column %s is not allowed", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str(), v.column.c_str() ); return -1; } } } } } // If altering the table, drop all the columns which are present // in DB but are not in the request, iterate over DB colums list // to find out columns which are ppresent in DB, compare with the // incoming request list of columns colsPerTableInReq if(alterTable && dbCol) { for ( auto col : *dbCol) { // Make sure the column to be dropped is not a primary key if(colsPerTableInReq.find(col) == colsPerTableInReq.end()) { if (!col.key) { // this column is in database but not in latest schema request // need to drop this column sql += "drop column " + col.column + "," ; columnsToAlter = true; } else { raiseError("create_schema", "%s:%d Schema:%s, Service:%s, tableName:%s, dropping th ekey column is not allowed", __FUNCTION__, __LINE__, schema.c_str(), service.c_str(), name.c_str(), col.column.c_str()); return -1; } } } } // remove last comma if ( sql[sql.size() - 1] == ',') { sql.erase(sql.size()-1); } if (alterTable) sql += " ;"; else sql += " );"; // execute the sql here // if alterTable is true and no columns to Alter , then dont fire the sqlquery // if (!(alterTable && !columnsToAlter)) { sqlQuery q; q.query = sql.c_str(); q.purgeOpArg = "CreatingSchema - phase 1, creating/altering tables"; char msg[MSG_LEN] = {'\0'}; snprintf(msg, MSG_LEN, "Function: %s, Schema:%s, Service:%s, tableName:%s, Error in creating/altering tables, command executed = %s",__FUNCTION__, schema.c_str(), service.c_str(), name.c_str(), sql.c_str()); q.logMsg = msg; queries.push_back(q); } std::vector<std::string> &indexMatrixFromDB = indexMapFromDB[name]; bool indexPresent = false; // create the indexes in req and not in DB // iterate over the index creation request and search from // them in DB, if does not exist create it for (auto &req : indexesMatrixFromReq) { indexPresent = false; for ( auto &row : indexMatrixFromDB) { if (req == row) indexPresent = true; } if(!indexPresent) { sqlIdx = "create index " + name + "_" + getIndexName(req) + " on " + schema + "." + name + "("; sqlIdx += req; sqlIdx += " );"; sqlQuery q; q.query = sqlIdx.c_str(); q.purgeOpArg = "CreatingSchema - phase 2, creating index on tables"; char msg[MSG_LEN] = {'\0'}; snprintf(msg, MSG_LEN, "Function :%s, Schema:%s, Service:%s, tableName:%s Error in creating indexes command %s",__FUNCTION__, schema.c_str(), service.c_str(), name.c_str(), sqlIdx.c_str()); q.logMsg = msg; queries.push_back(q); } } // delete the indexes in DB and not in req // iterate over the indexes list present in DB and compare with the // indexes in teh schema creation request,if not found, then delete them // for (auto &req : indexMatrixFromDB) { indexPresent = false; for ( auto &row : indexesMatrixFromReq) { if (req == row) indexPresent = true; } if(!indexPresent) { sqlIdx = "drop index " + schema + "." + name + "_" + req + ";"; sqlQuery q; q.query = sqlIdx; q.purgeOpArg = "CreatingSchema - phase 2, dropping index on tables"; char msg[MSG_LEN] = {'\0'}; snprintf(msg, MSG_LEN, "Function: %s, Schema:%s, Service:%s, tableName:%s, Error in executing drop index command %s",__FUNCTION__, schema.c_str(), service.c_str(), name.c_str(), sqlIdx.c_str()); q.logMsg = msg; queries.push_back(q); } } } // Iterate over all the sqlQuery command and execute them for (sqlQuery& q : queries) { if(!q.query.empty()) { rowsAffectedLastCommand = purgeOperation(q.query.c_str(), logSection, q.purgeOpArg.c_str(), false); if (rowsAffectedLastCommand == -1) { raiseError("create_schema", q.logMsg.c_str()); return -1; } } } // // delete all the tables which are not in the new schema request // but present in db sqlDropTables += "drop table if exists "; bool tableToDrop = false; for (auto itr : columnMapFromDB) { if (unSetTablesInSchemaRequest.find(itr.first) == unSetTablesInSchemaRequest.end()) { sqlDropTables += schema +"." + itr.first + ","; tableToDrop = true; } } if (sqlDropTables[sqlDropTables.size() -1 ] == ',') { sqlDropTables.erase(sqlDropTables.size() -1); } sqlDropTables += ";"; if (tableToDrop) { rowsAffectedLastCommand = purgeOperation(sqlDropTables.c_str(), logSection, "Dropping unrequired tables", false); if (rowsAffectedLastCommand == -1) { raiseError("create_schema", "%s:%d Error in executing drop table command %s",__FUNCTION__,__LINE__, sqlDropTables.c_str()); return -1; } } // delete payload in fledge.service_schema if already present if(schemaCreationReq == false) { std::string s = "delete from fledge.service_schema where name = '" + schema + "' and service = '" + service + "';"; rowsAffectedLastCommand = purgeOperation(s.c_str(), logSection, "delete from fledge.service_schema ", false); if (rowsAffectedLastCommand == -1) { raiseError("create_schema", "%s:%d Error in executing delete payload from service_schema command =%s",__FUNCTION__, __LINE__, s.c_str()); return -1; } } // insert payload in the fledge.service_schema std::string s = "insert into fledge.service_schema(name, service, version, definition) values ('" + schema + "', " +"'" + service + "', " + to_string(version) + ", " + "'" + payload + "') ;" ; rowsAffectedLastCommand = purgeOperation(s.c_str(), logSection, "insert in fledge.service_schema ", false); if (rowsAffectedLastCommand == -1) { raiseError("create_schema", "%s:%d Error in executing insert payload into service_schema, command =%s ",__FUNCTION__, __LINE__, s.c_str()); return -1; } } } } } } catch( std::exception &e){ raiseError("create_schema", "%s %d exception caught %s", __FUNCTION__, __LINE__, e.what() ); return -1; } return 1; } /** * This function checks for input string for ',' and returns a string with ',' replaced with '_' * * @param[in] string to check for ',' * @return string with , replaced with _ */ std::string Connection::getIndexName(std::string s){ std::replace_if( s.begin(),s.end(), [](char ch) {return ch ==',';},'_'); return s; } /** * This function checks whether the passed string represent a valid postgres column data type * * @param[in] string to check for ',' * @return true if it is a valid data type , false otherwise */ bool Connection::checkValidDataType(const std::string &s){ return ( s == "varchar" || s == "integer" || s == "double" || s == "real" || s == "sequence"); } /** * Purge readings by asset or purge all readings * * @param asset The asset name to purge * If empty all assets will be removed * @return The number of removed asset records */ unsigned int Connection::purgeReadingsAsset(const string& asset) { SQLBuffer sql; unsigned int rowsAffected; sql.append("DELETE FROM fledge.readings"); if (!asset.empty()) { sql.append(" WHERE asset_code = '" + asset + "'"); } sql.append(';'); const char *query = sql.coalesce(); logSQL("PurgeReadingsAsset", query); START_TIME; PGresult *res = PQexec(dbConnection, query); END_TIME; delete[] query; if (PQresultStatus(res) == PGRES_COMMAND_OK) { PQclear(res); return atoi(PQcmdTuples(res)); } raiseError("PurgeReadingsAsset", PQerrorMessage(dbConnection)); PQclear(res); return 0; } ================================================ FILE: C/plugins/storage/postgres/connection_manager.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <connection_manager.h> #include <connection.h> #include <logger.h> #include <stdexcept> ConnectionManager *ConnectionManager::instance = 0; /** * Default constructor for the connection manager. */ ConnectionManager::ConnectionManager() { lastError.message = NULL; lastError.entryPoint = NULL; if (getenv("FLEDGE_TRACE_SQL")) m_logSQL = true; else m_logSQL = false; } /** * Called at shutdown. Shrink the idle pool, this will * have the side effect of closing the connections to the database. */ void ConnectionManager::shutdown() { shrinkPool(idle.size()); } /** * Return the singleton instance of the connection manager. * if none was created then create it. */ ConnectionManager *ConnectionManager::getInstance() { if (instance == 0) { instance = new ConnectionManager(); } return instance; } /** * Grow the connection pool by the number of connections * specified. * * @param delta The number of connections to add to the pool */ void ConnectionManager::growPool(unsigned int delta) { while (delta-- > 0) { try { Connection *conn = new Connection(); conn->setTrace(m_logSQL); conn->setMaxReadingRows(m_maxReadingRows); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); } catch (std::exception& e) { Logger::getLogger()->error("Failed to create storage connection: %s", e.what()); } } } /** * Attempt to shrink the number of connections in the idle pool * * @param delta Number of connections to attempt to remove * @return The number of connections removed. */ unsigned int ConnectionManager::shrinkPool(unsigned int delta) { unsigned int removed = 0; Connection *conn; while (delta-- > 0) { idleLock.lock(); conn = idle.back(); idle.pop_back(); idleLock.unlock(); if (conn) { delete conn; removed++; } else { break; } } return removed; } /** * Allocate a connection from the idle pool. If * no connection is available add a new connection */ Connection *ConnectionManager::allocate() { Connection *conn = 0; idleLock.lock(); if (idle.empty()) { conn = new Connection(); conn->setTrace(m_logSQL); conn->setMaxReadingRows(m_maxReadingRows); } else { conn = idle.front(); idle.pop_front(); } idleLock.unlock(); if (conn) { inUseLock.lock(); inUse.push_front(conn); inUseLock.unlock(); } return conn; } /** * Release a connection back to the idle pool for * reallocation. * * @param conn The connection to release. */ void ConnectionManager::release(Connection *conn) { inUseLock.lock(); inUse.remove(conn); inUseLock.unlock(); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); } /** * Set the last error information for a plugin. * * @param source The source of the error * @param description The error description * @param retryable Flag to determien if the error condition is transient */ void ConnectionManager::setError(const char *source, const char *description, bool retryable) { errorLock.lock(); if (lastError.entryPoint) free(lastError.entryPoint); if (lastError.message) free(lastError.message); lastError.retryable = retryable; lastError.entryPoint = strdup(source); lastError.message = strdup(description); errorLock.unlock(); } ================================================ FILE: C/plugins/storage/postgres/include/connection.h ================================================ #ifndef _CONNECTION_H #define _CONNECTION_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <sql_buffer.h> #include <string> #include <rapidjson/document.h> #include <libpq-fe.h> #include <unordered_map> #include <unordered_set> #include <functional> #include <vector> #define STORAGE_PURGE_RETAIN_ANY 0x0001U #define STORAGE_PURGE_RETAIN_ALL 0x0002U #define STORAGE_PURGE_SIZE 0x0004U /** * Maximum number of readings to insert in a single * insert statement */ #define INSERT_ROW_LIMIT 5000 class Connection { public: Connection(); ~Connection(); bool retrieve(const std::string& schema, const std::string& table, const std::string& condition, std::string& resultSet); bool retrieveReadings(const std::string& condition, std::string& resultSet); int insert(const std::string& table, const std::string& data); int update(const std::string& table, const std::string& data); int deleteRows(const std::string& table, const std::string& condition); int appendReadings(const char *readings); bool fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet); unsigned int purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& results); unsigned int purgeReadingsByRows(unsigned long rowcount, unsigned int flags,unsigned long sent, std::string& results); unsigned long purgeOperation(const char *sql, const char *logSection, const char *phase, bool retrieve); long tableSize(const std::string& table); void setTrace(bool flag) { m_logSQL = flag; }; static bool formatDate(char *formatted_date, size_t formatted_date_size, const char *date); int create_table_snapshot(const std::string& table, const std::string& id); int load_table_snapshot(const std::string& table, const std::string& id); int delete_table_snapshot(const std::string& table, const std::string& id); bool get_table_snapshots(const std::string& table, std::string& resultSet); bool aggregateQuery(const rapidjson::Value& payload, std::string& resultSet); int create_schema(const std::string &payload); bool findSchemaFromDB(const std::string &service, const std::string &name, std::string &resultSet); unsigned int purgeReadingsAsset(const std::string& asset); void setMaxReadingRows(long rows) { m_maxReadingRows = rows; } private: bool m_logSQL; void raiseError(const char *operation, const char *reason,...); PGconn *dbConnection; void mapResultSet(PGresult *res, std::string& resultSet); bool jsonModifiers(const rapidjson::Value&, SQLBuffer&); bool jsonAggregates(const rapidjson::Value&, const rapidjson::Value&, SQLBuffer&, SQLBuffer&, bool isTableReading = false); bool jsonWhereClause(const rapidjson::Value& whereClause, SQLBuffer&, std::vector<std::string> &asset_codes, bool convertLocaltime = false, std::string prefix = ""); bool returnJson(const rapidjson::Value&, SQLBuffer&, SQLBuffer&); char *trim(char *str); const std::string escape_double_quotes(const std::string&); const std::string escape(const std::string&); const std::string double_quote_reserved_column_name(const std::string &column_name); void logSQL(const char *, const char *); bool isFunction(const char *) const; bool selectColumns(const rapidjson::Value& document, SQLBuffer& sql, int level); bool appendTables(const std::string &schema, const rapidjson::Value& document, SQLBuffer& sql, int level); bool processJoinQueryWhereClause(const rapidjson::Value& query, SQLBuffer& sql, std::vector<std::string> &asset_codes, int level); std::string getIndexName(std::string s); bool checkValidDataType(const std::string &s); long m_maxReadingRows; typedef struct{ std::string column; std::string type; int sz; bool key = false; } columnRec; // Custom Hash Functor that will compute the hash on the // passed objects column data member struct columnRecHasher { size_t operator()(const columnRec & obj) const { return std::hash<std::string>()(obj.column); } }; struct columnRecComparator { bool operator()(const columnRec & obj1, const columnRec & obj2) const { if (obj1.column == obj2.column) return true; return false; } }; typedef struct{ std::string query; std::string purgeOpArg; std::string logMsg; } sqlQuery; public: bool parseDatabaseStorageSchema(int &version, const std::string &res, std::unordered_map<std::string, std::unordered_set<columnRec, columnRecHasher, columnRecComparator> > &tableColumnMap, std::unordered_map<std::string, std::vector<std::string> > &tableIndexMap, bool &schemaCreationRequest); }; #endif ================================================ FILE: C/plugins/storage/postgres/include/connection_manager.h ================================================ #ifndef _CONNECTION_MANAGER_H #define _CONNECTION_MANAGER_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_api.h> #include <list> #include <mutex> class Connection; /** * Singleton class to manage Postgres connection pool */ class ConnectionManager { public: static ConnectionManager *getInstance(); void growPool(unsigned int); unsigned int shrinkPool(unsigned int); Connection *allocate(); void release(Connection *); void shutdown(); void setError(const char *, const char *, bool); PLUGIN_ERROR *getError() { return &lastError; } void setMaxReadingRows(long rows) { m_maxReadingRows = rows; } private: ConnectionManager(); static ConnectionManager *instance; std::list<Connection *> idle; std::list<Connection *> inUse; std::mutex idleLock; std::mutex inUseLock; std::mutex errorLock; PLUGIN_ERROR lastError; bool m_logSQL; long m_maxReadingRows; }; #endif ================================================ FILE: C/plugins/storage/postgres/plugin.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <connection_manager.h> #include <connection.h> #include <plugin_api.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include "libpq-fe.h" #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include <sstream> #include <iostream> #include <string> #include <logger.h> #include <plugin_exception.h> #include <config_category.h> using namespace std; using namespace rapidjson; #define DEFAULT_SCHEMA "fledge" #define OR_DEFAULT_SCHEMA(x) ((x) ? (x) : DEFAULT_SCHEMA) /** * The Postgres plugin interface */ extern "C" { const char *default_config = QUOTE({ "poolSize" : { "description" : "Connection pool size", "type" : "integer", "default" : "5", "displayName" : "Pool Size", "order" : "1" }, "maxReadingRows" : { "description" : "The maximum number of readings to insert in a single statement", "type" : "integer", "default" : "5000", "displayName" : "Max. Insert Rows", "order" : "2" } }); /** * The plugin information structure */ static PLUGIN_INFORMATION info = { "PostgresSQL", // Name "1.2.0", // Version SP_COMMON|SP_READINGS, // Flags PLUGIN_TYPE_STORAGE, // Type "1.6.0", // Interface version default_config }; /** * Return the information about this plugin */ PLUGIN_INFORMATION *plugin_info() { return &info; } /** * Initialise the plugin, called to get the plugin handle * In the case of Postgres we also get a pool of connections * to use. */ PLUGIN_HANDLE plugin_init(ConfigCategory *category) { ConnectionManager *manager = ConnectionManager::getInstance(); long poolSize = 5, maxReadingRows = 5000; if (category->itemExists("poolSize")) { poolSize = strtol(category->getValue("poolSize").c_str(), NULL, 10); } if (category->itemExists("maxReadingRows")) { long val = strtol(category->getValue("maxReadingRows").c_str(), NULL, 10); if (val > 0) maxReadingRows = val; } manager->setMaxReadingRows(maxReadingRows); manager->growPool(poolSize); return manager; } /** * Insert into an arbitrary table */ int plugin_common_insert(PLUGIN_HANDLE handle, char *schema, char *table, char *data) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return 0; } int result = connection->insert(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(data)); manager->release(connection); return result; } /** * Retrieve data from an arbitrary table */ const char *plugin_common_retrieve(PLUGIN_HANDLE handle, char *schema, char *table, char *query) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return NULL; } bool rval = connection->retrieve(schema, std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(query), results); manager->release(connection); if (rval) { return strdup(results.c_str()); } return NULL; } /** * Update an arbitary table */ int plugin_common_update(PLUGIN_HANDLE handle, char *schema, char *table, char *data) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return 0; } int result = connection->update(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(data)); manager->release(connection); return result; } /** * Delete from an arbitrary table */ int plugin_common_delete(PLUGIN_HANDLE handle, char *schema , char *table, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return 0; } int result = connection->deleteRows(std::string(OR_DEFAULT_SCHEMA(schema)) + "." + std::string(table), std::string(condition)); manager->release(connection); return result; } /** * Append a sequence of readings to the readings buffer */ int plugin_reading_append(PLUGIN_HANDLE handle, char *readings) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return 0; } int result = connection->appendReadings(readings); manager->release(connection); return result; } /** * Fetch a block of readings from the readings buffer */ char *plugin_reading_fetch(PLUGIN_HANDLE handle, unsigned long id, unsigned int blksize) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string resultSet; if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return NULL; } connection->fetchReadings(id, blksize, resultSet); manager->release(connection); return strdup(resultSet.c_str()); } /** * Retrieve some readings from the readings buffer */ char *plugin_reading_retrieve(PLUGIN_HANDLE handle, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return NULL; } connection->retrieveReadings(std::string(condition), results); manager->release(connection); return strdup(results.c_str()); } /** * Purge readings from the buffer */ char *plugin_reading_purge(PLUGIN_HANDLE handle, unsigned long param, unsigned int flags, unsigned long sent) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; unsigned long age, size; if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return NULL; } if (flags & STORAGE_PURGE_SIZE) { (void)connection->purgeReadingsByRows(param, flags, sent, results); } else { age = param; (void)connection->purgeReadings(age, flags, sent, results); } manager->release(connection); return strdup(results.c_str()); } /** * Release a previously returned result set */ void plugin_release(PLUGIN_HANDLE handle, char *results) { (void)handle; free(results); } /** * Return details on the last error that occured. */ PLUGIN_ERROR *plugin_last_error(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; return manager->getError(); } /** * Shutdown the plugin */ bool plugin_shutdown(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; manager->shutdown(); return true; } /** * Create snapshot of a common table * * @param handle The plugin handle * @param table The table to shapshot * @param id The snapshot id * @return -1 on error, >= o on success * * The new created table has the following name: * table_id */ int plugin_create_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return -1; } int result = connection->create_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Load a snapshot of a common table * * @param handle The plugin handle * @param table The table to fill from a given snapshot * @param id The table snapshot id * @return -1 on error, >= o on success */ int plugin_load_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return -1; } int result = connection->load_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Delete a snapshot of a common table * * @param handle The plugin handle * @param table The table which shapshot will be removed * @param id The snapshot id * @return -1 on error, >= o on success * */ int plugin_delete_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return -1; } int result = connection->delete_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Get all snapshots of a given common table * * @param handle The plugin handle * @param table The table name * @return List of snapshots (even empty list) or NULL for errors * */ const char* plugin_get_table_snapshots(PLUGIN_HANDLE handle, char *table) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return NULL; } bool rval = connection->get_table_snapshots(std::string(table), results); manager->release(connection); return rval ? strdup(results.c_str()) : NULL; } /** * Create schema of a common table * * @param handle The plugin handle * @param payload The payload to shapshot * @return -1 on error, >= o on success * */ int plugin_createSchema(PLUGIN_HANDLE handle, char *payload) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return -1; } int result = connection->create_schema(std::string(payload)); manager->release(connection); return result; } int plugin_schema_update(PLUGIN_HANDLE handle, char *schema, char *payload) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return 0; } // create_schema handles both create and update schema // schema value gets parsed from the payload int result = connection->create_schema(std::string(payload)); manager->release(connection); return result; } /** * Purge given readings asset or all readings from the buffer */ unsigned int plugin_reading_purge_asset(PLUGIN_HANDLE handle, char *asset) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); if (connection == NULL) { Logger::getLogger()->fatal("No database connections available"); return 0; } unsigned int deleted = connection->purgeReadingsAsset(asset); manager->release(connection); return deleted; } }; ================================================ FILE: C/plugins/storage/sqlite/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(sqlite) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) # Path of compiled sqlite3 file: /usr/local/bin set(FLEDGE_SQLITE3_LIBS "/usr/local/bin" CACHE INTERNAL "") # Find source files file(GLOB SOURCES ./common/*.cpp ./schema/*.cpp *.cpp) # Include header files include_directories(./include) include_directories(./common/include) include_directories(./schema/include) include_directories(../../../common/include) include_directories(../../../services/common/include) include_directories(../common/include) include_directories(../../../thirdparty/rapidjson/include) link_directories(${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Check Sqlite3 required version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}") find_package(sqlite3) # Use static SQLite3 library if(EXISTS ${FLEDGE_SQLITE3_LIBS}) include_directories(${FLEDGE_SQLITE3_LIBS}) target_link_libraries(${PROJECT_NAME} -L"${FLEDGE_SQLITE3_LIBS}/.libs" -lsqlite3) else() target_link_libraries(${PROJECT_NAME} -lsqlite3) endif() # Install SQLite3 command line with static library if(EXISTS ${FLEDGE_SQLITE3_LIBS}) install(PROGRAMS ${FLEDGE_SQLITE3_LIBS}/sqlite3 DESTINATION "fledge/plugins/storage/${PROJECT_NAME}") endif() # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/plugins/storage/${PROJECT_NAME}) # Install init.sql install(FILES ${CMAKE_SOURCE_DIR}/scripts/plugins/storage/${PROJECT_NAME}/init.sql DESTINATION fledge/plugins/storage/${PROJECT_NAME}) install(FILES ${CMAKE_SOURCE_DIR}/scripts/plugins/storage/${PROJECT_NAME}/init_readings.sql DESTINATION fledge/plugins/storage/${PROJECT_NAME}) ================================================ FILE: C/plugins/storage/sqlite/Findsqlite3.cmake ================================================ # This CMake file locates the SQLite3 development libraries # # The following variables are set: # SQLITE_FOUND - If the SQLite library was found # SQLITE_LIBRARIES - Path to the static library # SQLITE_INCLUDE_DIR - Path to SQLite headers # SQLITE_VERSION - Library version set(SQLITE_MIN_VERSION "3.11.0") # Check wether path of compiled libsqlite3.a and .h files exists if (EXISTS ${FLEDGE_SQLITE3_LIBS}) find_path(SQLITE_INCLUDE_DIR sqlite3.h PATHS ${FLEDGE_SQLITE3_LIBS}) find_library(SQLITE_LIBRARIES NAMES libsqlite3.a PATHS "${FLEDGE_SQLITE3_LIBS}/.libs") else() find_path(SQLITE_INCLUDE_DIR sqlite3.h) find_library(SQLITE_LIBRARIES NAMES libsqlite3.so) endif() if (SQLITE_INCLUDE_DIR AND SQLITE_LIBRARIES) execute_process(COMMAND grep ".*#define.*SQLITE_VERSION " ${SQLITE_INCLUDE_DIR}/sqlite3.h COMMAND sed "s/.*\"\\(.*\\)\".*/\\1/" OUTPUT_VARIABLE SQLITE_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) if ("${SQLITE_VERSION}" VERSION_LESS "${SQLITE_MIN_VERSION}") message(FATAL_ERROR "SQLite3 version >= ${SQLITE_MIN_VERSION} required, found version ${SQLITE_VERSION}") else() message(STATUS "Found SQLite version ${SQLITE_VERSION}: ${SQLITE_LIBRARIES}") set(SQLITE_FOUND TRUE) endif() else() message(FATAL_ERROR "Could not find SQLite") endif() ================================================ FILE: C/plugins/storage/sqlite/common/connection.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sqlite_common.h> #include <connection.h> #include <connection_manager.h> #include <utils.h> #include <unistd.h> #include "readings_catalogue.h" /* * Control the way purge deletes readings. The block size sets a limit as to how many rows * get deleted in each call, whilst the sleep interval controls how long the thread sleeps * between deletes. The idea is to not keep the database locked too long and allow other threads * to have access to the database between blocks. */ #define PURGE_SLEEP_MS 500 #define PURGE_SLOWDOWN_AFTER_BLOCKS 5 #define PURGE_SLOWDOWN_SLEEP_MS 500 #define LOG_AFTER_NERRORS (MAX_RETRIES / 2) /** * SQLite3 storage plugin for Fledge */ using namespace std; using namespace rapidjson; #define CONNECT_ERROR_THRESHOLD 5*60 // 5 minutes /* * The following allows for conditional inclusion of code that tracks the top queries * run by the storage plugin and the number of times a particular statement has to * be retried because of the database being busy./ */ #define DO_PROFILE 0 #define DO_PROFILE_RETRIES 0 #if DO_PROFILE #include <profile.h> #define TOP_N_STATEMENTS 10 // Number of statements to report in top n #define RETRY_REPORT_THRESHOLD 1000 // Report retry statistics every X calls QueryProfile profiler(TOP_N_STATEMENTS); unsigned long retryStats[MAX_RETRIES] = { 0,0,0,0,0,0,0,0,0,0 }; unsigned long numStatements = 0; int maxQueue = 0; #endif static std::atomic<int> m_waiting(0); static std::atomic<int> m_writeAccessOngoing(0); static std::mutex db_mutex; static std::condition_variable db_cv; static int purgeBlockSize = PURGE_DELETE_BLOCK_SIZE; #define START_TIME std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); #define END_TIME std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); \ auto usecs = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); static time_t connectErrorTime = 0; /** * This SQLIte3 query callback returns a formatted date * by SELECT strftime('format', column, 'locatime') * * @param data Output parameter to update with new datetime * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int dateCallback(void *data, int nCols, char **colValues, char **colNames) { if (colValues[0] != NULL) { memcpy((char *)data, colValues[0], strlen(colValues[0])); // OK return 0; } else { // Failure return 1; } } /** * Retrieves the current datetime (now ()) from SQlite * * @param Now Output parameter - now () * @return True, operations succeded * */ bool Connection::getNow(string& Now) { bool retCode; char* zErrMsg = NULL; char nowDate[100] = ""; string nowSqlCMD = "SELECT " SQLITE3_NOW_READING; int rc = SQLexec(dbHandle, "now", nowSqlCMD.c_str(), dateCallback, nowDate, &zErrMsg); if (rc == SQLITE_OK ) { Now = nowDate; retCode = true; } else { Logger::getLogger()->error("SELECT NOW() error :%s:", nowSqlCMD.c_str(), zErrMsg); sqlite3_free(zErrMsg); Now = ""; retCode = false; } return retCode; } //### #########################################################################################: /** * Apply Fledge default datetime formatting * to a detected DATETIME datatype column * * @param pStmt Current SQLite3 result set * @param i Current column index * @param Output parameter for new date * @return True is format has been applied, * False otherwise */ bool Connection::applyColumnDateTimeFormat(sqlite3_stmt *pStmt, int i, string& newDate) { bool apply_format = false; string formatStmt = {}; if (sqlite3_column_database_name(pStmt, i) != NULL && sqlite3_column_table_name(pStmt, i) != NULL) { if ((strcmp(sqlite3_column_origin_name(pStmt, i), "user_ts") == 0) && (strcmp(sqlite3_column_table_name(pStmt, i), "readings") == 0) && (strlen((char *) sqlite3_column_text(pStmt, i)) == 32)) { // Extract milliseconds and microseconds for the user_ts field of the readings table formatStmt = string("SELECT strftime('"); formatStmt += string(F_DATEH24_SEC); formatStmt += "', '" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "')"; formatStmt += " || substr('" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "', instr('" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "', '.'), 7)"; apply_format = true; } else { /** * Handle here possible unformatted DATETIME column type * If (column_name == column_original_name) AND * (sqlite3_column_table_name() == "DATETIME") * we assume the column has not been formatted * by any datetime() or strftime() SQLite function. * Thus we apply default FLEDGE formatting: * "%Y-%m-%d %H:%M:%f" */ if (sqlite3_column_database_name(pStmt, i) != NULL && sqlite3_column_table_name(pStmt, i) != NULL && (strcmp(sqlite3_column_origin_name(pStmt, i), sqlite3_column_name(pStmt, i)) == 0)) { const char *pzDataType; int retType = sqlite3_table_column_metadata(dbHandle, sqlite3_column_database_name(pStmt, i), sqlite3_column_table_name(pStmt, i), sqlite3_column_name(pStmt, i), &pzDataType, NULL, NULL, NULL, NULL); // Check whether to Apply dateformat if (pzDataType != NULL && retType == SQLITE_OK && strcmp(pzDataType, SQLITE3_FLEDGE_DATETIME_TYPE) == 0 && strcmp(sqlite3_column_origin_name(pStmt, i), sqlite3_column_name(pStmt, i)) == 0) { // Column metadata found and column datatype is "pzDataType" formatStmt = string("SELECT strftime('"); formatStmt += string(F_DATEH24_MS); string columnText ((char *) sqlite3_column_text(pStmt, i)); if (columnText.find("strftime") != string::npos) { formatStmt += "', " + columnText + ")"; } else { formatStmt += "', '" + columnText + "')"; } apply_format = true; } else { // Format not done // Just log the error if present if (retType != SQLITE_OK) { Logger::getLogger()->error("SQLite3 failed " \ "to call sqlite3_table_column_metadata() " \ "for column '%s'", sqlite3_column_name(pStmt, i)); } } } } } if (apply_format) { char* zErrMsg = NULL; // New formatted data char formattedData[100] = ""; // Exec the format SQL int rc = SQLexec(dbHandle, "date", formatStmt.c_str(), dateCallback, formattedData, &zErrMsg); if (rc == SQLITE_OK ) { // Use new formatted datetime value newDate.assign(formattedData); return true; } else { Logger::getLogger()->error("SELECT dateformat '%s': error %s", formatStmt.c_str(), zErrMsg); sqlite3_free(zErrMsg); } } return false; } /** * Apply the specified date format * using the available formats in SQLite3 * for a specific column * * If the requested format is not available * the input column is used as is. * Additionally milliseconds could be rounded * upon request. * The routine return false if date format is not * found and the caller might decide to raise an error * or use the non formatted value * * @param inFormat Input date format from application * @param colName The column name to format * @param outFormat The formatted column * @return True if format has been applied or * false id no format is in use. */ bool applyColumnDateFormat(const string& inFormat, const string& colName, string& outFormat, bool roundMs) { bool retCode; // Get format, if any, from the supported formats map const string format = sqliteDateFormat[inFormat]; if (!format.empty()) { // Apply found format via SQLite3 strftime() outFormat.append("strftime('"); outFormat.append(format); outFormat.append("', "); // Check whether we have to round milliseconds if (roundMs == true && format.back() == 'f') { outFormat.append("cast(round((julianday("); outFormat.append(colName); outFormat.append(") - 2440587.5)*86400 -0.00005, 3) AS FLOAT), 'unixepoch'"); } else { outFormat.append(colName); } outFormat.append(" )"); retCode = true; } else { // Use column as is outFormat.append(colName); retCode = false; } return retCode; } /** * Apply the specified date format * using the available formats in SQLite3 * for a specific column * * If the requested format is not available * the input column is used as is. * Additionally milliseconds could be rounded * upon request. * The routine return false if date format is not * found and the caller might decide to raise an error * or use the non formatted value * * @param inFormat Input date format from application * @param colName The column name to format * @param outFormat The formatted column * @return True if format has been applied or * false id no format is in use. */ bool applyColumnDateFormatLocaltime(const string& inFormat, const string& colName, string& outFormat, bool roundMs) { bool retCode; // Get format, if any, from the supported formats map const string format = sqliteDateFormat[inFormat]; if (!format.empty()) { // Apply found format via SQLite3 strftime() outFormat.append("strftime('"); outFormat.append(format); outFormat.append("', "); // Check whether we have to round milliseconds if (roundMs == true && format.back() == 'f') { outFormat.append("cast(round((julianday("); outFormat.append(colName); outFormat.append(") - 2440587.5)*86400 -0.00005, 3) AS FLOAT), 'unixepoch'"); } else { outFormat.append(colName); } outFormat.append(", 'localtime')"); retCode = true; } else { // Use column as is outFormat.append(colName); retCode = false; } return retCode; } /** * Apply the specified date format * using the available formats in SQLite3 * * @param inFormat Input date format from application * @param outFormat The formatted column * @return True if format has been applied or * false */ bool applyDateFormat(const string& inFormat, string& outFormat) { bool retCode; // Get format, if any, from the supported formats map const string format = sqliteDateFormat[inFormat]; if (!format.empty()) { // Apply found format via SQLite3 strftime() outFormat.append("strftime('"); outFormat.append(format); outFormat.append("', "); return true; } else { return false; } } #ifndef SQLITE_SPLIT_READINGS /** * Create a SQLite3 database connection */ Connection::Connection(ConnectionManager *manager) : m_manager(manager) { string dbPath, dbPathReadings; const char *defaultConnection = getenv("DEFAULT_SQLITE_DB_FILE"); const char *defaultReadingsConnection = getenv("DEFAULT_SQLITE_DB_READINGS_FILE"); m_logSQL = false; m_queuing = 0; m_streamOpenTransaction = true; if (defaultConnection == NULL) { // Set DB base path dbPath = getDataDir(); // Add the filename dbPath += _DB_NAME; } else { dbPath = defaultConnection; } if (defaultReadingsConnection == NULL) { // Set DB base path dbPathReadings = getDataDir(); // Add the filename dbPathReadings += READINGS_DB_FILE_NAME; } else { dbPathReadings = defaultReadingsConnection; } // Allow usage of URI for filename sqlite3_config(SQLITE_CONFIG_URI, 1); Logger *logger = Logger::getLogger(); /** * Make a connection to the database * and check backend connection was successfully made * Note: * we assume the database already exists, so the flag * SQLITE_OPEN_CREATE is not added in sqlite3_open_v2 call */ if (sqlite3_open_v2(dbPath.c_str(), &dbHandle, SQLITE_OPEN_READWRITE | SQLITE_OPEN_NOMUTEX, NULL) != SQLITE_OK) { const char* dbErrMsg = sqlite3_errmsg(dbHandle); const char* errMsg = "Failed to open the SQLite3 database"; logger->error("%s '%s': %s", dbErrMsg, dbPath.c_str(), dbErrMsg); connectErrorTime = time(0); raiseError("Connection", "%s '%s': '%s'", dbErrMsg, dbPath.c_str(), dbErrMsg); sqlite3_close_v2(dbHandle); dbHandle = NULL; } else { int rc; char *zErrMsg = NULL; // Enable the WAL for the fledge DB rc = sqlite3_exec(dbHandle, m_manager->getDBConfiguration().c_str(), NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { string errMsg = "Failed to set WAL from the fledge DB - " + m_manager->getDBConfiguration(); logger->error("%s : error %s", m_manager->getDBConfiguration().c_str(), zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); } /* * Build the ATTACH DATABASE command in order to get * 'fledge.' prefix in all SQL queries */ SQLBuffer attachDb; attachDb.append("ATTACH DATABASE '"); attachDb.append(dbPath + "' AS fledge;"); const char *sqlStmt = attachDb.coalesce(); // Exec the statement rc = SQLexec(dbHandle, "database", sqlStmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to attach 'fledge' database in"; logger->error("%s '%s': error %s", errMsg, sqlStmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { logger->info("Connected to SQLite3 database: %s", dbPath.c_str()); } //Release sqlStmt buffer delete[] sqlStmt; // Attach readings database - readings_1 if (access(dbPathReadings.c_str(), R_OK) != 0) { logger->info("No readings database, assuming seperate readings plugin is avialable"); m_noReadings = true; } else { m_noReadings = false; SQLBuffer attachReadingsDb; attachReadingsDb.append("ATTACH DATABASE '"); attachReadingsDb.append(dbPathReadings + "' AS readings_1;"); const char *sqlReadingsStmt = attachReadingsDb.coalesce(); // Exec the statement rc = SQLexec(dbHandle, "database", sqlReadingsStmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to attach 'readings' database in"; logger->error("%s '%s': error %s", errMsg, sqlReadingsStmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { logger->info("Connected to SQLite3 database: %s", dbPath.c_str()); } //Release sqlStmt buffer delete[] sqlReadingsStmt; // Enable the WAL for the readings DB rc = sqlite3_exec(dbHandle, m_manager->getDBConfiguration().c_str(), NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { string errMsg = "Failed to set WAL from the readings DB - " + m_manager->getDBConfiguration(); Logger::getLogger()->error("%s : error %s", errMsg.c_str(), zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); } ReadingsCatalogue *catalogue = ReadingsCatalogue::getInstance(); catalogue->createReadingsOverflowTable(dbHandle, 1); } } if (!m_noReadings) { // Attach all the defined/used databases ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); if ( !readCat->connectionAttachAllDbs(dbHandle) ) { const char* errMsg = "Failed to attach all the databases to the connection database in"; logger->error(errMsg); connectErrorTime = time(0); sqlite3_close_v2(dbHandle); throw new runtime_error(errMsg); } else { logger->info("Attached all %d readings databases to connection", readCat->getReadingsCount()); } } else { logger->info("Connection will not attach to readings tables"); } m_schemaManager = SchemaManager::getInstance(); } #endif /** * Destructor for the database connection. * Close the connection to SQLite3 db */ Connection::~Connection() { sqlite3_close_v2(dbHandle); } /** * Enable or disable the tracing of SQL statements * * @param flag Desired state of the SQL trace flag */ void Connection::setTrace(bool flag) { m_logSQL = flag; } /** * Map a SQLite3 result set to a string version of a JSON document * * @param res Sqlite3 result set * @param resultSet Output Json as string * @return SQLite3 result code of sqlite3_step(res) * */ int Connection::mapResultSet(void* res, string& resultSet, unsigned long *rowsCount) { // Cast to SQLite3 result set sqlite3_stmt* pStmt = (sqlite3_stmt *)res; // JSON generic document Document doc; // SQLite3 return code int rc; // Number of returned rows, number of columns unsigned long nRows = 0, nCols = 0; // Create the JSON document doc.SetObject(); // Get document allocator Document::AllocatorType& allocator = doc.GetAllocator(); // Create the array for returned rows Value rows(kArrayType); // Rows counter, set it to 0 now Value count; count.SetInt(0); // Iterate over all the rows in the resultSet while ((rc = SQLstep(pStmt)) == SQLITE_ROW) { // Get number of columns for current row nCols = sqlite3_column_count(pStmt); // Create the 'row' object Value row(kObjectType); // Build the row with all fields for (int i = 0; i < nCols; i++) { // JSON document for the current row Document d; // Set object name as the column name Value name(sqlite3_column_name(pStmt, i), allocator); // Get the "TEXT" value of the column value char* str = (char *)sqlite3_column_text(pStmt, i); // Check the column value datatype switch (sqlite3_column_type(pStmt, i)) { case (SQLITE_NULL): { row.AddMember(name, "", allocator); break; } case (SQLITE3_TEXT): { /** * Handle here possible unformatted DATETIME column type */ string newDate; if (applyColumnDateTimeFormat(pStmt, i, newDate)) { // Use new formatted datetime value str = (char *)newDate.c_str(); } Value value; if (!d.Parse(str).HasParseError()) { if (d.IsNumber()) { // Set string value = Value(str, allocator); } else { // JSON parsing ok, use the document // if string value is not "null", "true", "false" if (strcmp(str, "null") != 0 && strcmp(str, "true") != 0 && strcmp(str, "false") != 0) { value = Value(d, allocator); } else { // Use (char *) value for "null", "true", "false" value = Value(str, allocator); } } } else { // Use (char *) value value = Value(str, allocator); } // Add name & value to the current row row.AddMember(name, value, allocator); break; } case (SQLITE_INTEGER): { int64_t intVal = atol(str); // Add name & value to the current row row.AddMember(name, intVal, allocator); break; } case (SQLITE_FLOAT): { double dblVal = atof(str); // Add name & value to the current row row.AddMember(name, dblVal, allocator); break; } default: { // Default: use (char *) value Value value(str != NULL ? str : "", allocator); // Add name & value to the current row row.AddMember(name, value, allocator); break; } } } // All fields added: increase row counter nRows++; // Add the current row to the all rows object rows.PushBack(row, allocator); } if (rowsCount != nullptr) { *rowsCount = nRows; } // All rows added: update rows count count.SetInt(nRows); // Add 'rows' and 'count' to the final JSON document doc.AddMember("count", count, allocator); doc.AddMember("rows", rows, allocator); /* Write out the JSON document we created */ StringBuffer buffer; Writer<StringBuffer> writer(buffer); doc.Accept(writer); // Set the result as a CPP string resultSet = buffer.GetString(); // Return SQLite3 ret code return rc; } /** * This SQLIte3 query callback just returns the number of rows seen * by a SELECT statement in the 'data' parameter * * @param data Output parameter to update with number of rows * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int selectCallback(void *data, int nCols, char **colValues, char **colNames) { int *nRows = (int *)data; // Increment the number of rows seen *nRows++; // Set OK return 0; } /** * This SQLIte3 query count callback just returns the number of rows * as per 'count(*)' column * by a SELECT statement in the 'data' parameter * * @param data Output parameter to update with number of rows * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int countCallback(void *data, int nCols, char **colValues, char **colNames) { int *nRows = (int *)data; // Return the value of the first column: the count(*) *nRows = atoi(colValues[0]); // Set OK return 0; } /** * This SQLIte3 query rowid callback just returns the rowid * by a SELECT statement in the 'data' parameter * * @param data Output parameter to update with rowid * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int rowidCallback(void *data, int nCols, char **colValues, char **colNames) { unsigned long *rowid = (unsigned long *)data; // Return the value of the first column: the count(*) if (colValues[0]) *rowid = strtoul(colValues[0], NULL, 10); else *rowid = 0; // Set OK return 0; } #ifndef SQLITE_SPLIT_READINGS /** * Perform a query against a common table * */ bool Connection::retrieve(const string& schema, const string& table, const string& condition, string& resultSet) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; // Extra constraints to add to where clause SQLBuffer jsonConstraints; bool isOptAggregate = false; vector<string> asset_codes; if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("retrieve", "Schema %s does not exist, unable to retrieve from table %s", schema.c_str(), table.c_str()); return false; } try { if (dbHandle == NULL) { raiseError("retrieve", "No SQLite3 db connection available"); return false; } if (condition.empty()) { sql.append("SELECT * FROM "); sql.append(schema); sql.append('.'); sql.append(table); } else { if (document.Parse(condition.c_str()).HasParseError()) { raiseError("retrieve", "Failed to parse JSON payload"); return false; } if (document.HasMember("aggregate")) { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } if (!jsonAggregates(document, document["aggregate"], sql, jsonConstraints, isOptAggregate, false)) { return false; } sql.append(" FROM "); sql.append(schema); sql.append('.'); } else if (document.HasMember("join")) { sql.append("SELECT "); selectColumns(document, sql, 0); } else if (document.HasMember("return")) { int col = 0; Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { sql.append(itr->GetString()); } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } // SQLite 3 date format. string new_format; applyColumnDateFormat((*itr)["format"].GetString(), (*itr)["column"].GetString(), new_format, true); // Add the formatted column or use it as is sql.append(new_format); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } // SQLite3 doesnt support time zone formatting if (strcasecmp((*itr)["timezone"].GetString(), "utc") != 0) { raiseError("retrieve", "SQLite3 plugin does not support timezones in qeueries"); return false; } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", 'utc')"); } } else { sql.append((*itr)["column"].GetString()); } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } sql.append(" FROM "); sql.append(schema); sql.append('.'); } else { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } sql.append(" * FROM "); sql.append(schema); sql.append('.'); } if (document.HasMember("join")) { sql.append(" FROM "); sql.append(schema); sql.append('.'); sql.append(table); sql.append(" t0"); appendTables(schema, document, sql, 1); } else { sql.append(table); } if (document.HasMember("where")) { sql.append(" WHERE "); if (document.HasMember("join")) { if (!jsonWhereClause(document["where"], sql, asset_codes, false, "t0.")) { return false; } // Now and the join condition itself string col0, col1; const Value& join = document["join"]; if (join.HasMember("on") && join["on"].IsString()) { col0 = join["on"].GetString(); } else { raiseError("rerieve", "Missing on item"); return false; } if (join.HasMember("table")) { const Value& table = join["table"]; if (table.HasMember("column") && table["column"].IsString()) { col1 = table["column"].GetString(); } else { raiseError("QueryTable", "Missing column in join table"); return false; } } sql.append(" AND t0."); sql.append(col0); sql.append(" = t1."); sql.append(col1); sql.append(" "); if (join.HasMember("query") && join["query"].IsObject()) { sql.append("AND "); const Value& query = join["query"]; processJoinQueryWhereClause(query, sql, asset_codes, 1); } } else if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes, false)) { raiseError("retrieve", "Failed to add where clause"); return false; } } else { raiseError("retrieve", "JSON does not contain where clause"); return false; } if (! jsonConstraints.isEmpty()) { sql.append(" AND "); const char *jsonBuf = jsonConstraints.coalesce(); sql.append(jsonBuf); delete[] jsonBuf; } } if (!jsonModifiers(document, sql, false)) { raiseError("query", "Modifiers failed"); return false; } } sql.append(';'); const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt = NULL; logSQL("CommonRetrive", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; if (stmt) { sqlite3_finalize(stmt); } return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; // Failure return false; } // Release memory for 'query' var delete[] query; // Success return true; } catch (exception e) { raiseError("retrieve", "Internal error: %s", e.what()); } return false; } #endif #ifndef SQLITE_SPLIT_READINGS /** * Insert data into a table */ int Connection::insert(const string& schema, const string& table, const string& data) { Document document; ostringstream convert; sqlite3_stmt *stmt = NULL; int rc; std::size_t arr = data.find("inserts"); if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("insert", "Schema %s does not exist, unable to insert into table %s", schema.c_str(), table.c_str()); return false; } // Check first the 'inserts' property in JSON data bool stdInsert = (arr == std::string::npos || arr > 8); // If input data is not an array of iserts // create an array with one element if (stdInsert) { convert << "{ \"inserts\" : [ "; convert << data; convert << " ] }"; } if (document.Parse(stdInsert ? convert.str().c_str() : data.c_str()).HasParseError()) { raiseError("insert", "Failed to parse JSON payload\n"); return -1; } // Get the array with row(s) Value &inserts = document["inserts"]; if (!inserts.IsArray()) { raiseError("insert", "Payload is missing the inserts array"); return -1; } // Number of inserts int ins = 0; int failedInsertCount = 0; // Generate sql query for prepared statement for (Value::ConstValueIterator iter = inserts.Begin(); iter != inserts.End(); ++iter) { if (!iter->IsObject()) { raiseError("insert", "Each entry in the insert array must be an object"); return -1; } { int col = 0; SQLBuffer sql; SQLBuffer values; sql.append("INSERT INTO " + schema + "." + table + " ("); for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); itr != (*iter).MemberEnd(); ++itr) { // Append column name if (col) { sql.append(", "); } sql.append(itr->name.GetString()); col++; } sql.append(") VALUES ("); for ( auto i = 0 ; i < col; i++ ) { if (i) { sql.append(","); } sql.append("?"); } sql.append(");"); const char *query = sql.coalesce(); rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); if (rc != SQLITE_OK) { if (stmt) { sqlite3_finalize(stmt); } raiseError("insert", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; return -1; } delete[] query; // Bind columns with prepared sql query int columID = 1; for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); itr != (*iter).MemberEnd(); ++itr) { if (itr->value.IsString()) { const char *str = itr->value.GetString(); if (strcmp(str, "now()") == 0) { sqlite3_bind_text(stmt, columID, SQLITE3_NOW, -1, SQLITE_TRANSIENT); } else { sqlite3_bind_text(stmt, columID, str, -1, SQLITE_TRANSIENT); } } else if (itr->value.IsDouble()) { sqlite3_bind_double(stmt, columID,itr->value.GetDouble()); } else if (itr->value.IsInt64()) { sqlite3_bind_int(stmt, columID,(long)itr->value.GetInt64()); } else if (itr->value.IsInt()) { sqlite3_bind_int(stmt, columID,itr->value.GetInt()); } else if (itr->value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); itr->value.Accept(writer); sqlite3_bind_text(stmt, columID, buffer.GetString(), -1, SQLITE_TRANSIENT); } columID++ ; } if (sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { if (stmt) { sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); sqlite3_finalize(stmt); } raiseError("insert", sqlite3_errmsg(dbHandle)); return -1; } m_writeAccessOngoing.fetch_add(1); int sqlite3_resut = SQLstep(stmt); m_writeAccessOngoing.fetch_sub(1); if (sqlite3_resut != SQLITE_DONE) { failedInsertCount++; raiseError("insert", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", sqlite3_expanded_sql(stmt)); // transaction is still open, do rollback if (sqlite3_get_autocommit(dbHandle) == 0) { rc = sqlite3_exec(dbHandle,"ROLLBACK TRANSACTION;",NULL,NULL,NULL); if (rc != SQLITE_OK) { raiseError("insert rollback", sqlite3_errmsg(dbHandle)); } } } sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); if (sqlite3_resut == SQLITE_DONE && sqlite3_exec(dbHandle, "COMMIT TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { if (stmt) { sqlite3_finalize(stmt); } raiseError("insert", sqlite3_errmsg(dbHandle)); return -1; } sqlite3_finalize(stmt); } // Increment row count ins++; } if (m_writeAccessOngoing == 0) db_cv.notify_all(); if (failedInsertCount) { char buf[100]; snprintf(buf, sizeof(buf), "Not all inserts into table '%s.%s' within transaction succeeded", schema.c_str(), table.c_str()); raiseError("insert", buf); } return (!failedInsertCount ? ins : -1); } #endif #ifndef SQLITE_SPLIT_READINGS /** * Perform an update against a common table * This routine uses SQLite 3 JSON1 extension: * * json_set(field, '$.key.value', the_value) * */ int Connection::update(const string& schema, const string& table, const string& payload) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; vector<string> asset_codes; bool allowZero = false; int row = 0; ostringstream convert; ostringstream threadId; threadId << std::this_thread::get_id(); if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("update", "Schema %s does not exist, unable to update table %s", schema.c_str(), table.c_str()); return false; } std::size_t arr = payload.find("updates"); bool changeReqd = (arr == std::string::npos || arr > 8); if (changeReqd) { convert << "{ \"updates\" : [ "; convert << payload; convert << " ] }"; } if (document.Parse(changeReqd?convert.str().c_str():payload.c_str()).HasParseError()) { raiseError("update", "Failed to parse JSON payload"); return -1; } else { Value &updates = document["updates"]; if (!updates.IsArray()) { raiseError("update", "Payload is missing the updates array"); return -1; } sql.append("BEGIN TRANSACTION;"); int i=0; for (Value::ConstValueIterator iter = updates.Begin(); iter != updates.End(); ++iter,++i) { if (!iter->IsObject()) { raiseError("update", "Each entry in the update array must be an object"); return -1; } sql.append("UPDATE "); sql.append(schema); sql.append('.'); sql.append(table); sql.append(" SET "); int col = 0; if ((*iter).HasMember("values")) { const Value& values = (*iter)["values"]; for (Value::ConstMemberIterator itr = values.MemberBegin(); itr != values.MemberEnd(); ++itr) { if (col != 0) { sql.append( ", "); } sql.append(itr->name.GetString()); sql.append(" = "); if (itr->value.IsString()) { const char *str = itr->value.GetString(); if (strcmp(str, "now()") == 0) { sql.append(SQLITE3_NOW); } else { sql.append('\''); sql.append(escape(str)); sql.append('\''); } } else if (itr->value.IsDouble()) sql.append(itr->value.GetDouble()); else if (itr->value.IsUint64()) sql.append((unsigned long)itr->value.GetUint64()); else if (itr->value.IsInt64()) sql.append((long)itr->value.GetInt64()); else if (itr->value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); itr->value.Accept(writer); sql.append('\''); sql.append(escape(buffer.GetString())); sql.append('\''); } // Handle JSON value null: "item" : null else if (itr->value.IsNull()) { sql.append("NULL"); } col++; } } if ((*iter).HasMember("expressions")) { const Value& exprs = (*iter)["expressions"]; if (!exprs.IsArray()) { raiseError("update", "The property exressions must be an array"); return -1; } for (Value::ConstValueIterator itr = exprs.Begin(); itr != exprs.End(); ++itr) { if (col != 0) { sql.append( ", "); } if (!itr->IsObject()) { raiseError("update", "expressions must be an array of objects"); return -1; } if (!itr->HasMember("column")) { raiseError("update", "Missing column property in expressions array item"); return -1; } if (!itr->HasMember("operator")) { raiseError("update", "Missing operator property in expressions array item"); return -1; } if (!itr->HasMember("value")) { raiseError("update", "Missing value property in expressions array item"); return -1; } sql.append((*itr)["column"].GetString()); sql.append(" = "); sql.append((*itr)["column"].GetString()); sql.append(' '); sql.append((*itr)["operator"].GetString()); sql.append(' '); const Value& value = (*itr)["value"]; if (value.IsString()) { const char *str = value.GetString(); if (strcmp(str, "now()") == 0) { sql.append(SQLITE3_NOW); } else { sql.append('\''); sql.append(str); sql.append('\''); } } else if (value.IsDouble()) sql.append(value.GetDouble()); else if (value.IsInt64()) sql.append((long)value.GetInt64()); else if (value.IsInt()) sql.append(value.GetInt()); else if (value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); sql.append('\''); sql.append(buffer.GetString()); sql.append('\''); } col++; } } if ((*iter).HasMember("json_properties")) { const Value& exprs = (*iter)["json_properties"]; if (!exprs.IsArray()) { raiseError("update", "The property json_properties must be an array"); return -1; } for (Value::ConstValueIterator itr = exprs.Begin(); itr != exprs.End(); ++itr) { if (col != 0) { sql.append( ", "); } if (!itr->IsObject()) { raiseError("update", "json_properties must be an array of objects"); return -1; } if (!itr->HasMember("column")) { raiseError("update", "Missing column property in json_properties array item"); return -1; } if (!itr->HasMember("path")) { raiseError("update", "Missing path property in json_properties array item"); return -1; } if (!itr->HasMember("value")) { raiseError("update", "Missing value property in json_properties array item"); return -1; } sql.append((*itr)["column"].GetString()); // SQLite 3 JSON1 extension: json_set // json_set(field, '$.key.value', the_value) sql.append(" = json_set("); sql.append((*itr)["column"].GetString()); sql.append(", '$."); const Value& path = (*itr)["path"]; if (!path.IsArray()) { raiseError("update", "The property path must be an array"); return -1; } int pathElement = 0; for (Value::ConstValueIterator itr2 = path.Begin(); itr2 != path.End(); ++itr2) { if (pathElement > 0) { sql.append('.'); } if (itr2->IsString()) { sql.append(itr2->GetString()); } else { raiseError("update", "The elements of path must all be strings"); return -1; } pathElement++; } sql.append("', "); const Value& value = (*itr)["value"]; if (value.IsString()) { const char *str = value.GetString(); if (strcmp(str, "now()") == 0) { sql.append(SQLITE3_NOW); } else { sql.append('\''); sql.append(escape(str)); sql.append('\''); } } else if (value.IsDouble()) { sql.append(value.GetDouble()); } else if (value.IsInt64()) { sql.append((long)value.GetInt64()); } else if (value.IsInt()) { sql.append(value.GetInt()); } else if (value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); sql.append('\''); sql.append(escape(buffer.GetString())); sql.append('\''); } sql.append(")"); col++; } } if (iter->HasMember("modifier") && (*iter)["modifier"].IsArray()) { const Value& modifier = (*iter)["modifier"]; for (Value::ConstValueIterator modifiers = modifier.Begin(); modifiers != modifier.End(); ++modifiers) { if (modifiers->IsString()) { string mod = modifiers->GetString(); if (mod.compare("allowzero") == 0) { allowZero = true; } } } } if (col == 0) { raiseError("update", "Missing values or expressions object in payload"); return -1; } if ((*iter).HasMember("condition")) { sql.append(" WHERE "); if (!jsonWhereClause((*iter)["condition"], sql, asset_codes)) { return false; } } else if ((*iter).HasMember("where")) { sql.append(" WHERE "); if (!jsonWhereClause((*iter)["where"], sql, asset_codes)) { return false; } } sql.append(';'); row++; } } sql.append("COMMIT TRANSACTION;"); const char *query = sql.coalesce(); logSQL("CommonUpdate", query); char *zErrMsg = NULL; int rc; // Exec the UPDATE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); rc = SQLexec(dbHandle, table, query, NULL, NULL, &zErrMsg); m_writeAccessOngoing.fetch_sub(1); if (m_writeAccessOngoing == 0) db_cv.notify_all(); // Check result code if (rc != SQLITE_OK) { raiseError("update", zErrMsg); sqlite3_free(zErrMsg); if (sqlite3_get_autocommit(dbHandle)==0) // transaction is still open, do rollback { rc=SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { raiseError("rollback", zErrMsg); sqlite3_free(zErrMsg); } } Logger::getLogger()->error("SQL statement: %s", query); // Release memory for 'query' var delete[] query; return -1; } else { // Release memory for 'query' var delete[] query; int update = sqlite3_changes(dbHandle); int return_value=0; if (update == 0 && allowZero == false) { char buf[100]; snprintf(buf, sizeof(buf), "Not all updates of table '%s.%s' within transaction succeeded", schema.c_str(), table.c_str()); raiseError("update", buf); return_value = -1; } else { return_value = (row == 1 ? update : row); } // Returns the number of rows affected, cases : // // 1) update == 0, no update, returns -1 // 2) single command SQL that could affects multiple rows, returns 'update' // 3) multiple SQL commands packed and executed in one SQLExec, returns 'row' return (return_value); } // Return failure return -1; } #endif /** * Format a date to a fixed format with milliseconds, microseconds and * timezone expressed, examples : * * case - formatted |2019-01-01 10:01:01.000000+00:00| date |2019-01-01 10:01:01| * case - formatted |2019-02-01 10:02:01.000000+00:00| date |2019-02-01 10:02:01.0| * case - formatted |2019-02-02 10:02:02.841000+00:00| date |2019-02-02 10:02:02.841| * case - formatted |2019-02-03 10:02:03.123456+00:00| date |2019-02-03 10:02:03.123456| * case - formatted |2019-03-01 10:03:01.100000+00:00| date |2019-03-01 10:03:01.1+00:00| * case - formatted |2019-03-02 10:03:02.123000+00:00| date |2019-03-02 10:03:02.123+00:00| * case - formatted |2019-03-03 10:03:03.123456+00:00| date |2019-03-03 10:03:03.123456+00:00| * case - formatted |2019-03-04 10:03:04.123456+01:00| date |2019-03-04 10:03:04.123456+01:00| * case - formatted |2019-03-05 10:03:05.123456-01:00| date |2019-03-05 10:03:05.123456-01:00| * case - formatted |2019-03-04 10:03:04.123456+02:30| date |2019-03-04 10:03:04.123456+02:30| * case - formatted |2019-03-05 10:03:05.123456-02:30| date |2019-03-05 10:03:05.123456-02:30| * * @param out false if the date is invalid * */ bool Connection::formatDate(char *formatted_date, size_t buffer_size, const char *date) { struct timeval tv = {0}; struct tm tm = {0}; char *valid_date = nullptr; enum codeOptimization{CO_NONE, CO_01, CO_02, CO_03}; codeOptimization opt; int len; // Code optimization for the cases: // // 2019-03-03 10:03:03.123456+00:00 // 2019-02-02 10:02:02.841 // 2019-01-01 10:01:01 len = strlen(date); if (len == 32) { if ( date[19] == '.' && (date[26] == '-' || date[26] == '+')&& date[29] == ':' ) { // Case - 2019-03-03 10:03:03.123456+00:00 strcpy(formatted_date, date); opt = CO_01; } else opt = CO_NONE; } else if (len == 23) { if ( date[19] == '.') { // Case - 2019-02-02 10:02:02.841 strcpy(formatted_date, date); strcat(formatted_date, "000+00:00"); opt = CO_02; } else opt = CO_NONE; } else if (len == 19) { // Case - 2019-01-01 10:01:01 strcpy(formatted_date, date); strcat(formatted_date, ".000000+00:00"); opt = CO_03; } else { opt = CO_NONE; } if (opt != CO_NONE) { return (true); } // Extract up to seconds memset(&tm, 0, sizeof(tm)); valid_date = strptime(date, F_DATEH24_SEC, &tm); if (! valid_date) { return (false); } strftime (formatted_date, buffer_size, F_DATEH24_SEC, &tm); // Work out the microseconds from the fractional part of the seconds char fractional[10] = {0}; sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%[0-9]*", fractional); // Truncate to max 6 digits fractional[6] = 0; int multiplier = 6 - (int)strlen(fractional); if (multiplier < 0) multiplier = 0; while (multiplier--) strcat(fractional, "0"); strcat(formatted_date ,"."); strcat(formatted_date ,fractional); // Handles timezone char timezone_hour[5] = {0}; char timezone_min[5] = {0}; char sign[2] = {0}; sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%*d-%2[0-9]:%2[0-9]", timezone_hour, timezone_min); if (timezone_hour[0] != 0) { strcat(sign, "-"); } else { memset(timezone_hour, 0, sizeof(timezone_hour)); memset(timezone_min, 0, sizeof(timezone_min)); sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%*d+%2[0-9]:%2[0-9]", timezone_hour, timezone_min); if (timezone_hour[0] != 0) { strcat(sign, "+"); } else { // No timezone is expressed in the source date // the default UTC is added strcat(formatted_date, "+00:00"); } } if (sign[0] != 0) { if (timezone_hour[0] != 0) { strcat(formatted_date, sign); // Pad with 0 if an hour having only 1 digit was provided // +1 -> +01 if (strlen(timezone_hour) == 1) strcat(formatted_date, "0"); strcat(formatted_date, timezone_hour); strcat(formatted_date, ":"); } if (timezone_min[0] != 0) { strcat(formatted_date, timezone_min); // Pad with 0 if minutes having only 1 digit were provided // 3 -> 30 if (strlen(timezone_min) == 1) strcat(formatted_date, "0"); } else { // Minutes aren't expressed in the source date strcat(formatted_date, "00"); } } return (true); } #ifndef SQLITE_SPLIT_READINGS /** * Process the aggregate options and return the columns to be selected */ bool Connection::jsonAggregates(const Value& payload, const Value& aggregates, SQLBuffer& sql, SQLBuffer& jsonConstraint, bool &isOptAggregate, bool isTableReading, bool isExtQuery ) { string col; string column_name; isOptAggregate = false; if (aggregates.IsObject()) { if (! aggregates.HasMember("operation")) { raiseError("Select aggregation", "Missing property \"operation\""); return false; } if ((! aggregates.HasMember("column")) && (! aggregates.HasMember("json"))) { raiseError("Select aggregation", "Missing property \"column\" or \"json\""); return false; } string operation; // Handles the case of the count, the virtual tables should use count and the external the sun operation operation =aggregates["operation"].GetString(); if (isTableReading) { if (operation.compare("count") ==0) { isOptAggregate = true; if (isExtQuery) { operation = "sum"; } } } sql.append(operation); sql.append('('); if (aggregates.HasMember("column")) { col = aggregates["column"].GetString(); if (col.compare("*") == 0) // Faster to count ROWID rather than * { col = "ROWID"; sql.append(col); } else { // an operation different from the 'count' is requested if (isTableReading && (col.compare("user_ts") == 0) ) { sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); } else { sql.append("\""); sql.append(col); sql.append("\""); } } } else if (aggregates.HasMember("json")) { const Value& json = aggregates["json"]; if (! json.IsObject()) { raiseError("Select aggregation", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } // Use json_extract(field, '$.key1.key2') AS value sql.append("json_extract("); sql.append(json["column"].GetString()); sql.append(", '$."); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (jsonFields.IsArray()) { if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json keys NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); int field = 0; string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (field) { sql.append("."); } if (prev.length() > 0) { // Append Json field for NULL check jsonConstraint.append(prev); jsonConstraint.append("."); } prev = itr->GetString(); field++; // Append Json field for query sql.append(itr->GetString()); } // Add last Json key jsonConstraint.append(prev); // Add condition for all json keys not null jsonConstraint.append("') IS NOT NULL"); } else { // Append Json field for query sql.append(jsonFields.GetString()); if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json key NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); jsonConstraint.append(jsonFields.GetString()); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } sql.append("')"); } sql.append(") AS \""); if (aggregates.HasMember("alias")) { // Handles the case of the count: the external query should use the alias and the internal the name of the field if (isTableReading) { if (isExtQuery) sql.append(aggregates["alias"].GetString()); else sql.append(col); } else sql.append(aggregates["alias"].GetString()); } else { sql.append(aggregates["operation"].GetString()); sql.append('_'); sql.append(aggregates["column"].GetString()); } sql.append("\""); } else if (aggregates.IsArray()) { int index = 0; for (Value::ConstValueIterator itr = aggregates.Begin(); itr != aggregates.End(); ++itr) { if (!itr->IsObject()) { raiseError("select aggregation", "Each element in the aggregate array must be an object"); return false; } if ((! itr->HasMember("column")) && (! itr->HasMember("json"))) { raiseError("Select aggregation", "Missing property \"column\""); return false; } if (! itr->HasMember("operation")) { raiseError("Select aggregation", "Missing property \"operation\""); return false; } if (index) sql.append(", "); index++; sql.append((*itr)["operation"].GetString()); sql.append('('); if (itr->HasMember("column")) { column_name= (*itr)["column"].GetString(); if (isTableReading && (column_name.compare("user_ts") == 0) ) { sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); } else { sql.append("\""); sql.append(column_name); sql.append("\""); } } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! json.IsObject()) { raiseError("Select aggregation", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // Use json_extract(field, '$.key1.key2') AS value sql.append("json_extract("); column_name=json["column"].GetString(); sql.append(column_name); sql.append(", '$."); // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json keys NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); if (jsonFields.IsArray()) { string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (prev.length() > 0) { jsonConstraint.append(prev); jsonConstraint.append('.'); sql.append('.'); } // Append Json field for query sql.append(itr->GetString()); prev = itr->GetString(); } // Add last Json key jsonConstraint.append(prev); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } else { // Append Json field for query sql.append(jsonFields.GetString()); // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json key NULL check jsonConstraint.append(jsonFields.GetString()); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } sql.append("')"); } sql.append(") AS \""); if (itr->HasMember("alias")) { if (isTableReading) { if (isExtQuery) sql.append((*itr)["alias"].GetString()); else sql.append(column_name); } else sql.append((*itr)["alias"].GetString()); } else { sql.append((*itr)["operation"].GetString()); sql.append('_'); sql.append((*itr)["column"].GetString()); } sql.append("\""); } } if (payload.HasMember("group")) { sql.append(", "); if (payload["group"].IsObject()) { const Value& grp = payload["group"]; if (grp.HasMember("format")) { // SQLite 3 date format. string new_format; if (isTableReading) { applyColumnDateFormatLocaltime(grp["format"].GetString(), grp["column"].GetString(), new_format); } else { applyColumnDateFormat(grp["format"].GetString(), grp["column"].GetString(), new_format); } // Add the formatted column or use it as is sql.append(new_format); } else { sql.append(grp["column"].GetString()); } if (grp.HasMember("alias")) { sql.append(" AS \""); sql.append(grp["alias"].GetString()); sql.append("\""); } else { sql.append(" AS \""); sql.append(grp["column"].GetString()); sql.append("\""); } } else { sql.append(payload["group"].GetString()); } } if (payload.HasMember("timebucket")) { const Value& tb = payload["timebucket"]; if (! tb.IsObject()) { raiseError("Select data", "The \"timebucket\" property must be an object"); return false; } if (! tb.HasMember("timestamp")) { raiseError("Select data", "The \"timebucket\" object must have a timestamp property"); return false; } if (tb.HasMember("format")) { // SQLite 3 date format is limited. string new_format; if (applyDateFormat(tb["format"].GetString(), new_format)) { sql.append(", "); // Add the formatted column sql.append(new_format); if (tb.HasMember("size")) { // Use Unix epoch, without microseconds sql.append(tb["size"].GetString()); sql.append(" * round("); sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { sql.append(tb["timestamp"].GetString()); } sql.append(", 'unixepoch')"); } else { /** * No date format found: we should return an error. * Note: currently if input Json payload has no 'result' member * raiseError() results in no data being sent to the client * We use Unix epoch without microseconds */ sql.append(", datetime("); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); sql.append(" * round("); } // Use Unix epoch, without microseconds sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); if (tb.HasMember("size")) { sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { sql.append(")"); } sql.append(", 'unixepoch')"); } } else { sql.append(", datetime("); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); sql.append(" * round("); } /* * Default format when no format is specified: * - we use Unix time without milliseconds. */ sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); if (tb.HasMember("size")) { sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { sql.append(")"); } sql.append(", 'unixepoch')"); } sql.append(" AS \""); if (tb.HasMember("alias")) { sql.append(tb["alias"].GetString()); } else { sql.append("timestamp"); } sql.append('"'); } return true; } #endif /** * Process the modifiers for limit, skip, sort and group */ bool Connection::jsonModifiers(const Value& payload, SQLBuffer& sql, bool isTableReading) { if (payload.HasMember("timebucket") && payload.HasMember("sort")) { raiseError("query modifiers", "Sort and timebucket modifiers can not be used in the same payload"); return false; } if (payload.HasMember("group")) { sql.append(" GROUP BY "); if (payload["group"].IsObject()) { const Value& grp = payload["group"]; if (grp.HasMember("format")) { /** * SQLite 3 date format is limited. * Handle all available formats here. */ string new_format; if (isTableReading) { applyColumnDateFormatLocaltime(grp["format"].GetString(), grp["column"].GetString(), new_format); } else { applyColumnDateFormat(grp["format"].GetString(), grp["column"].GetString(), new_format); } // Add the formatted column or use it as is sql.append(new_format); } } else { sql.append(payload["group"].GetString()); } } if (payload.HasMember("sort")) { sql.append(" ORDER BY "); const Value& sortBy = payload["sort"]; if (sortBy.IsObject()) { if (! sortBy.HasMember("column")) { raiseError("Select sort", "Missing property \"column\""); return false; } sql.append(sortBy["column"].GetString()); sql.append(' '); if (! sortBy.HasMember("direction")) { sql.append("ASC"); } else { sql.append(sortBy["direction"].GetString()); } } else if (sortBy.IsArray()) { int index = 0; for (Value::ConstValueIterator itr = sortBy.Begin(); itr != sortBy.End(); ++itr) { if (!itr->IsObject()) { raiseError("select sort", "Each element in the sort array must be an object"); return false; } if (! itr->HasMember("column")) { raiseError("Select sort", "Missing property \"column\""); return false; } if (index) { sql.append(", "); } index++; sql.append((*itr)["column"].GetString()); sql.append(' '); if (! itr->HasMember("direction")) { sql.append("ASC"); } else { sql.append((*itr)["direction"].GetString()); } } } } if (payload.HasMember("timebucket")) { const Value& tb = payload["timebucket"]; if (! tb.IsObject()) { raiseError("Select data", "The \"timebucket\" property must be an object"); return false; } if (! tb.HasMember("timestamp")) { raiseError("Select data", "The \"timebucket\" object must have a timestamp property"); return false; } if (payload.HasMember("group")) { sql.append(", "); } else { sql.append(" GROUP BY "); } // Divide by "size" if (tb.HasMember("size")) { // Use Unix epoch without milliseconds sql.append("round(strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { // Use Unix epoch sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(")"); } sql.append(" ORDER BY "); // Use Unix epoch without milliseconds sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(")"); sql.append(" DESC"); } if (payload.HasMember("limit")) { if (!payload["limit"].IsInt()) { raiseError("limit", "Limit must be specfied as an integer"); return false; } sql.append(" LIMIT "); try { sql.append(payload["limit"].GetInt()); } catch (exception e) { raiseError("limit", "Bad value for limit parameter: %s", e.what()); return false; } } // OFFSET must go after LIMIT if (payload.HasMember("skip")) { // Add no limits if (!payload.HasMember("limit")) { sql.append(" LIMIT -1"); } if (!payload["skip"].IsInt()) { raiseError("skip", "Skip must be specfied as an integer"); return false; } sql.append(" OFFSET "); sql.append(payload["skip"].GetInt()); } return true; } #ifndef SQLITE_SPLIT_READINGS /** * Convert a JSON where clause into a SQLite3 where clause * */ bool Connection::jsonWhereClause( const Value& whereClause, SQLBuffer& sql, std::vector<std::string> &asset_codes, bool convertLocaltime, string prefix) { string column; string cond; if (!whereClause.IsObject()) { raiseError("where clause", "The \"where\" property must be a JSON object"); return false; } if (!whereClause.HasMember("column")) { raiseError("where clause", "The \"where\" object is missing a \"column\" property"); return false; } if (!whereClause.HasMember("condition")) { raiseError("where clause", "The \"where\" object is missing a \"condition\" property"); return false; } column = whereClause["column"].GetString(); if (!prefix.empty()) sql.append(prefix); sql.append(column); sql.append(' '); cond = whereClause["condition"].GetString(); if (cond.compare("isnull") == 0) { sql.append("isnull "); } else if (cond.compare("notnull") == 0) { sql.append("notnull "); } else { if (!whereClause.HasMember("value")) { raiseError("where clause", "The \"where\" object is missing a \"value\" property"); return false; } if (!cond.compare("older")) { if (!whereClause["value"].IsInt()) { raiseError("where clause", "The \"value\" of an \"older\" condition must be an integer"); return false; } sql.append("< datetime('now', '-"); sql.append(whereClause["value"].GetInt()); if (convertLocaltime) sql.append(" seconds', 'localtime')"); // Get value in localtime else sql.append(" seconds')"); // Get value in UTC by asking for no timezone } else if (!cond.compare("newer")) { if (!whereClause["value"].IsInt()) { raiseError("where clause", "The \"value\" of an \"newer\" condition must be an integer"); return false; } sql.append("> datetime('now', '-"); sql.append(whereClause["value"].GetInt()); if (convertLocaltime) sql.append(" seconds', 'localtime')"); // Get value in localtime else sql.append(" seconds')"); // Get value in UTC by asking for no timezone } else if (!cond.compare("in") || !cond.compare("not in")) { // Check we have a non empty array if (whereClause["value"].IsArray() && whereClause["value"].Size()) { sql.append(cond); sql.append(" ( "); int field = 0; for (Value::ConstValueIterator itr = whereClause["value"].Begin(); itr != whereClause["value"].End(); ++itr) { if (field) { sql.append(", "); } field++; if (itr->IsNumber()) { if (itr->IsInt()) { sql.append(itr->GetInt()); } else if (itr->IsInt64()) { sql.append((long)itr->GetInt64()); } else { sql.append(itr->GetDouble()); } } else if (itr->IsString()) { sql.append('\''); sql.append(escape(itr->GetString())); sql.append('\''); } else { string message("The \"value\" of a \"" + \ cond + \ "\" condition array element must be " \ "a string, integer or double."); raiseError("where clause", message.c_str()); return false; } } sql.append(" )"); } else { string message("The \"value\" of a \"" + \ cond + "\" condition must be an array " \ "and must not be empty."); raiseError("where clause", message.c_str()); return false; } } else { sql.append(cond); sql.append(' '); if (whereClause["value"].IsInt()) { sql.append(whereClause["value"].GetInt()); } else if (whereClause["value"].IsString()) { string value = whereClause["value"].GetString(); sql.append('\''); sql.append(escape(value )); sql.append('\''); // Identify a specific operation to restrinct the tables involved if (column.compare("asset_code") == 0) if ( cond.compare("=") == 0) asset_codes.push_back(value); } } } if (whereClause.HasMember("and")) { sql.append(" AND "); if (!jsonWhereClause(whereClause["and"], sql, asset_codes, convertLocaltime, prefix)) { return false; } } if (whereClause.HasMember("or")) { sql.append(" OR "); if (!jsonWhereClause(whereClause["or"], sql, asset_codes, convertLocaltime, prefix)) { return false; } } return true; } #endif /** * This routine uses SQLit3 JSON1 extension functions */ bool Connection::returnJson(const Value& json, SQLBuffer& sql, SQLBuffer& jsonConstraint) { if (! json.IsObject()) { raiseError("retrieve", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } // Call JSON1 SQLite3 extension routine 'json_extract' // json_extract(field, '$.key1.key2') AS value sql.append("json_extract("); sql.append(json["column"].GetString()); sql.append(", '$."); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (jsonFields.IsArray()) { if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json keys NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); int field = 0; string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (field) { sql.append("."); } if (prev.length()) { jsonConstraint.append(prev); jsonConstraint.append("."); } field++; // Append Json field for query sql.append(itr->GetString()); prev = itr->GetString(); } // Add last Json key jsonConstraint.append(prev); // Add condition for all json keys not null jsonConstraint.append("') IS NOT NULL"); } else { // Append Json field for query sql.append(jsonFields.GetString()); if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json key NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); jsonConstraint.append(jsonFields.GetString()); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } sql.append("') "); return true; } /** * Remove whitespace at both ends of a string */ char *Connection::trim(char *str) { char *ptr; while (*str && *str == ' ') str++; ptr = str + strlen(str) - 1; while (ptr > str && *ptr == ' ') { *ptr = 0; ptr--; } return str; } /** * Raise an error to return from the plugin */ void Connection::raiseError(const char *operation, const char *reason, ...) { ConnectionManager *manager = ConnectionManager::getInstance(); char tmpbuf[512]; va_list ap; va_start(ap, reason); vsnprintf(tmpbuf, sizeof(tmpbuf), reason, ap); va_end(ap); Logger::getLogger()->error("%s storage plugin raising error: %s: %s", PLUGIN_LOG_NAME, operation, tmpbuf); manager->setError(operation, tmpbuf, false); } /** * Return the sie of a given table in bytes */ long Connection::tableSize(const string& table) { SQLBuffer buf; raiseError("tableSize", "Not available in SQLite3 storage plugin"); return -1; } /** * String escape routine */ const string Connection::escape(const string& str) { char *buffer; const char *p1; char *p2; string newString; if (str.find_first_of('\'') == string::npos) { return str; } buffer = (char *)malloc(str.length() * 2); p1 = str.c_str(); p2 = buffer; while (*p1) { if (*p1 == '\'') { *p2++ = '\''; *p2++ = '\''; p1++; } else { *p2++ = *p1++; } } *p2 = 0; newString = string(buffer); free(buffer); return newString; } /** * Optionally log SQL statement execution * * @param tag A string tag that says why the SQL is being executed * @param stmt The SQL statement itself */ void Connection::logSQL(const char *tag, const char *stmt) { if (m_logSQL) { Logger::getLogger()->info("%s, %s: %s", PLUGIN_LOG_NAME, tag, stmt); } } #ifndef SQLITE_SPLIT_READINGS /** * SQLITE wrapper to rety statements when the database is locked * * @param db The open SQLite database * @param sql The SQL to execute * @param callback Callback function * @param cbArg Callback 1st argument * @param errmsg Locaiton to write error message */ int Connection::SQLexec(sqlite3 *db, const string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg) { int retries = 0, rc; *errmsg = NULL; do { #if DO_PROFILE ProfileItem *prof = new ProfileItem(sql); #endif if (*errmsg) { sqlite3_free(*errmsg); *errmsg = NULL; } rc = sqlite3_exec(db, sql, callback, cbArg, errmsg); #if DO_PROFILE prof->complete(); profiler.insert(prof); #endif retries++; if (rc != SQLITE_OK) { #if DO_PROFILE_RETRIES m_qMutex.lock(); m_waiting.fetch_add(1); if (maxQueue < m_waiting) maxQueue = m_waiting; m_qMutex.unlock(); #endif int interval = (1 * RETRY_BACKOFF); std::this_thread::sleep_for(std::chrono::milliseconds(interval)); #if DO_PROFILE_RETRIES m_qMutex.lock(); m_waiting.fetch_sub(1); m_qMutex.unlock(); #endif if (sqlite3_get_autocommit(db)==0) // if transaction is still open, do rollback { int rc2; char *zErrMsg = NULL; rc2=SQLexec(db, table, "ROLLBACK TRANSACTION;", NULL, NULL, &zErrMsg); if (rc2 != SQLITE_OK) { raiseError("rollback", zErrMsg); sqlite3_free(zErrMsg); } } } } while (retries < MAX_RETRIES && (rc != SQLITE_OK)); if (retries >= MAX_RETRIES) { Logger::getLogger()->error("SQL statement %s failed after maximum retries", sql, sqlite3_errmsg(dbHandle)); } else if (retries > LOG_AFTER_NERRORS) { Logger::getLogger()->warn("%d retries required of the SQL statement '%s': %s", retries, sql, sqlite3_errmsg(dbHandle)); Logger::getLogger()->warn("If the excessive retries continue for sustained periods it is a sign that the system may be reaching the limits of the load it can handle"); } #if DO_PROFILE_RETRIES retryStats[retries-1]++; if (++numStatements > RETRY_REPORT_THRESHOLD - 1) { numStatements = 0; Logger *log = Logger::getLogger(); log->info("Storage layer statement retry profile"); for (int i = 0; i < MAX_RETRIES-1; i++) { log->info("%2d: %d", i, retryStats[i]); retryStats[i] = 0; } log->info("Too many retries: %d", retryStats[MAX_RETRIES-1]); retryStats[MAX_RETRIES-1] = 0; log->info("Maximum retry queue length: %d", maxQueue); maxQueue = 0; } #endif if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("Database still locked after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("Database still busy after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } if (rc != SQLITE_OK) { Logger::getLogger()->error("Database error after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } return rc; } #endif /** * Execute a step command on a prepared statement but add the ability to retry on error. * * It is assumed that binding has already taken place and that those bound * vaiables are maintained for all retries. * * @param statement The prepared statement to step * @return int The status of the final sqlite3_step that was issued */ int Connection::SQLstep(sqlite3_stmt *statement) { int retries = 0, rc; do { #if DO_PROFILE ProfileItem *prof = new ProfileItem(sqlite3_sql(statement)); #endif if (retries) { sqlite3_reset(statement); } rc = sqlite3_step(statement); #if DO_PROFILE prof->complete(); profiler.insert(prof); #endif retries++; if (rc == SQLITE_LOCKED || rc == SQLITE_BUSY) { int interval = (retries * RETRY_BACKOFF); this_thread::sleep_for(chrono::milliseconds(interval)); } } while (retries < MAX_RETRIES && (rc == SQLITE_LOCKED || rc == SQLITE_BUSY)); if (retries >= MAX_RETRIES) { Logger::getLogger()->error("SQL statement failed after maximum retries", sqlite3_errmsg(dbHandle)); } else if (retries > LOG_AFTER_NERRORS) { Logger::getLogger()->warn("%d retries required of the SQL statement: %s", retries, sqlite3_errmsg(dbHandle)); Logger::getLogger()->warn("If the excessive retries continue for sustained periods it is a sign that the system may be reaching the limits of the load it can handle"); } #if DO_PROFILE_RETRIES retryStats[retries-1]++; if (++numStatements > 1000) { numStatements = 0; Logger *log = Logger::getLogger(); log->info("Storage layer statement retry profile"); for (int i = 0; i < MAX_RETRIES-1; i++) { log->info("%2d: %d", i, retryStats[i]); retryStats[i] = 0; } log->info("Too many retries: %d", retryStats[MAX_RETRIES-1]); retryStats[MAX_RETRIES-1] = 0; } #endif if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("Database still locked after maximum retries"); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("Database still busy after maximum retries"); } return rc; } #ifndef SQLITE_SPLIT_READINGS /** * Perform a delete against a common table * */ int Connection::deleteRows(const string& schema, const string& table, const string& condition) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; vector<string> asset_codes; if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("delete", "Schema %s does not exist, unable to delete from table %s", schema.c_str(), table.c_str()); return false; } sql.append("DELETE FROM "); sql.append(schema); sql.append('.'); sql.append(table); if (! condition.empty()) { sql.append(" WHERE "); if (document.Parse(condition.c_str()).HasParseError()) { raiseError("delete", "Failed to parse JSON payload"); return -1; } else { if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes)) { return -1; } } else { raiseError("delete", "JSON does not contain where clause"); return -1; } } } sql.append(';'); const char *query = sql.coalesce(); logSQL("CommonDelete", query); char *zErrMsg = NULL; int delete_rows; int rc; // Exec the DELETE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); rc = SQLexec(dbHandle, table, query, NULL, NULL, &zErrMsg); m_writeAccessOngoing.fetch_sub(1); if (m_writeAccessOngoing == 0) db_cv.notify_all(); // Check result code if (rc == SQLITE_OK) { // Success. Release memory for 'query' var delete[] query; return sqlite3_changes(dbHandle); } else { raiseError("delete", zErrMsg); sqlite3_free(zErrMsg); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; // Failure return -1; } } #endif #ifndef SQLITE_SPLIT_READINGS /** * Create snapshot of a common table * * @param table The table to snapshot * @param id The snapshot id * @return -1 on error, >= 0 on success * * The new created table name has the name: * $table_snap$id */ int Connection::create_table_snapshot(const string& table, const string& id) { string query = "CREATE TABLE fledge."; query += table + "_snap" + id + " AS SELECT * FROM fledge." + table; logSQL("CreateTableSnapshot", query.c_str()); char* zErrMsg = NULL; int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, &zErrMsg); // Check result code if (rc == SQLITE_OK) { return 1; } else { raiseError("create_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); return -1; } } /** * Set the contents of a common table from a snapshot * * @param table The table to fill * @param id The snapshot id of the table * @return -1 on error, >= 0 on success * */ int Connection::load_table_snapshot(const string& table, const string& id) { string purgeQuery = "DELETE FROM fledge." + table; string query = "BEGIN TRANSACTION; "; query += purgeQuery +"; INSERT INTO fledge." + table; query += " SELECT * FROM fledge." + table + "_snap" + id; query += "; COMMIT TRANSACTION;"; logSQL("LoadTableSnapshot", query.c_str()); char* zErrMsg = NULL; int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, &zErrMsg); // Check result code if (rc == SQLITE_OK) { return 1; } else { raiseError("load_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); // transaction is still open, do rollback if (sqlite3_get_autocommit(dbHandle) == 0) { rc = SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { raiseError("rollback for load_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); } } return -1; } } /** * Delete a snapshot of a common table * * @param table The table to snapshot * @param id The snapshot id * @return -1 on error, >= 0 on success * */ int Connection::delete_table_snapshot(const string& table, const string& id) { string query = "DROP TABLE fledge." + table + "_snap" + id; logSQL("DeleteTableSnapshot", query.c_str()); char* zErrMsg = NULL; int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, &zErrMsg); // Check result code if (rc == SQLITE_OK) { return 1; } else { raiseError("delete_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); return -1; } } /** * Get list of snapshots for a given common table * * @param table The given table name */ bool Connection::get_table_snapshots(const string& table, string& resultSet) { SQLBuffer sql; try { if (dbHandle == NULL) { raiseError("retrieve", "No SQLite 3 db connection available"); return false; } sql.append("SELECT REPLACE(name, '"); sql.append(table); sql.append("_snap', '') AS id FROM sqlite_master WHERE type='table' AND name LIKE '"); sql.append(table); sql.append("_snap%';"); const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt = NULL; logSQL("GetTableSnapshots", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); if (rc != SQLITE_OK) { raiseError("get_table_snapshots", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); if (stmt) sqlite3_finalize(stmt); delete[] query; return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("get_table_snapshots", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; // Failure return false; } // Release memory for 'query' var delete[] query; // Success return true; } catch (exception e) { raiseError("get_table_snapshots", "Internal error: %s", e.what()); // Failure return false; } } /** * In the case of a join add the columns to select from for all the tables in * the join * * @param document The query we are processing * @param sql The SQLBuffer we are writing * @param level The table number we are processing */ bool Connection::selectColumns(const Value& document, SQLBuffer& sql, int level) { SQLBuffer jsonConstraints; string tag = "t" + to_string(level) + "."; if (document.HasMember("return")) { int col = 0; const Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { sql.append(tag); sql.append(itr->GetString()); } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } // SQLite 3 date format. string new_format; applyColumnDateFormat((*itr)["format"].GetString(), tag + (*itr)["column"].GetString(), new_format, true); // Add the formatted column or use it as is sql.append(new_format); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } // SQLite3 doesnt support time zone formatting if (strcasecmp((*itr)["timezone"].GetString(), "utc") != 0) { raiseError("retrieve", "SQLite3 plugin does not support timezones in qeueries"); return false; } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append(tag); sql.append((*itr)["column"].GetString()); sql.append(", 'utc')"); } } else { sql.append(tag); sql.append((*itr)["column"].GetString()); } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } } else { sql.append('*'); return true; } if (document.HasMember("join")) { const Value& join = document["join"]; if (join.HasMember("query")) { const Value& query = join["query"]; sql.append(", "); if (!selectColumns(query, sql, ++level)) { raiseError("commonRetrieve", "Join failed to add select columns"); return false; } } } return true; } /** * In the case of a join add the tables to select from for all the tables in * the join * * @param schema The schema we are using * @param document The query we are processing * @param sql The SQLBuffer we are writing * @param level The table number we are processing */ bool Connection::appendTables(const string& schema, const Value& document, SQLBuffer& sql, int level) { string tag = "t" + to_string(level); if (document.HasMember("join")) { const Value& join = document["join"]; if (join.HasMember("table")) { const Value& table = join["table"]; if (!table.HasMember("name")) { raiseError("commonRetrieve", "Joining table is missing a table name"); return false; } const Value& name = table["name"]; if (!name.IsString()) { raiseError("commonRetrieve", "Joining table name is not a string"); return false; } sql.append(", "); sql.append(schema); sql.append('.'); sql.append(name.GetString()); sql.append(" "); sql.append(tag); if (join.HasMember("query")) { const Value& query = join["query"]; appendTables(schema, query, sql, ++level); } else { raiseError("commonRetrieve", "Join is missing a join query definition"); return false; } } else { raiseError("commonRetrieve", "Join is missing a table definition"); return false; } } return true; } /** * Recurse down and add the where cluase and join terms for each * new table joined to the query * * @param query The JSON query * @param sql The SQLBuffer we are writing the data to * @param asset_codes The asset codes * @param level The nestign level of the joined table */ bool Connection::processJoinQueryWhereClause(const Value& query, SQLBuffer& sql, std::vector<std::string> &asset_codes, int level) { string tag = "t" + to_string(level) + "."; if (!jsonWhereClause(query["where"], sql, asset_codes, true, tag)) { return false; } if (query.HasMember("join")) { // Now and the join condition itself string col0, col1; const Value& join = query["join"]; if (join.HasMember("on") && join["on"].IsString()) { col0 = join["on"].GetString(); } else { return false; } if (join.HasMember("table")) { const Value& table = join["table"]; if (table.HasMember("column") && table["column"].IsString()) { col1 = table["column"].GetString(); } else { raiseError("Joined query", "Missing join column in table"); return false; } } sql.append(" AND "); sql.append(tag); sql.append(col0); sql.append(" = t"); sql.append(level + 1); sql.append("."); sql.append(col1); sql.append(" "); if (join.HasMember("query") && join["query"].IsObject()) { sql.append(" AND "); const Value& query = join["query"]; processJoinQueryWhereClause(query, sql, asset_codes, level + 1); } } return true; } /** * Create schema and populate with tables and indexes as defined in the JSON schema * definition. * * @param schema The schema defintion as a JSON document containing information about schema of tables to create * @return true if the schema was created */ bool Connection::createSchema(const std::string &schema) { return m_schemaManager->create(dbHandle, schema); } /** * Execute a SQLite VACUUM command on the database */ bool Connection::vacuum() { char* zErrMsg = NULL; // Exec the statement int rc = SQLexec(dbHandle, "", "VACUUM;", NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to vacuum database "; Logger::getLogger()->error("%s: error %s", errMsg, zErrMsg); sqlite3_free(zErrMsg); return false; } else { Logger::getLogger()->info("Database vacuum complete"); } return true; } #endif /** * Return the first word in a SQL statement, ie the operation that is beign executed. * * @param sql The complete SQL statement * @return string The operation */ string Connection::operation(const char *sql) { const char *p1 = sql; char buf[40], *p2 = buf; while (*p1 && !isspace(*p1) && p2 - buf < 40) *p2++ = *p1++; *p2 = '\0'; return string(buf); } ================================================ FILE: C/plugins/storage/sqlite/common/connection_manager.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <sqlite3.h> #include <unistd.h> #include <sys/time.h> #include <sys/resource.h> #include <connection_manager.h> #include <connection.h> #include <readings_catalogue.h> #include <logger.h> #include <disk_monitor.h> #include <utils.h> #include <sqlite_common.h> #define PRAGMA_SMALL "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -2000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 2048000;" #define PRAGMA_NORMAL "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;" #define PRAGMA_HISPEED "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -8000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 81920000; PRAGMA temp_store = MEMORY" ConnectionManager *ConnectionManager::instance = 0; /** * Background thread entry point */ static void managerBackground(void *arg) { ConnectionManager *mgr = (ConnectionManager *)arg; mgr->background(); } /** * Default constructor for the connection manager. */ ConnectionManager::ConnectionManager() : m_shutdown(false), m_vacuumInterval(6 * 60 * 60), m_attachedDatabases(0), m_diskSpaceMonitor(0), m_config(0) { lastError.message = NULL; lastError.entryPoint = NULL; if (getenv("FLEDGE_TRACE_SQL")) m_trace = true; else m_trace = false; std::string dbPath, dbPathReadings; const char *defaultConnection = getenv("DEFAULT_SQLITE_DB_FILE"); const char *defaultReadingsConnection = getenv("DEFAULT_SQLITE_DB_READINGS_FILE"); if (defaultConnection == NULL) { // Set DB base path dbPath = getDataDir(); // Add the filename dbPath += _DB_NAME; } else { dbPath = defaultConnection; } if (defaultReadingsConnection == NULL) { // Set DB base path dbPathReadings = getDataDir(); // Add the filename dbPathReadings += READINGS_DB_FILE_NAME; } else { dbPathReadings = defaultReadingsConnection; } m_diskSpaceMonitor = new DiskSpaceMonitor(dbPath, dbPathReadings); m_background = new std::thread(managerBackground, this); struct rlimit lim; getrlimit(RLIMIT_NOFILE, &lim); m_descriptorLimit = lim.rlim_cur; } /** * Called at shutdown. Shrink the idle pool, this will * have the side effect of closing the connections to the database. */ void ConnectionManager::shutdown() { m_shutdown = true; shrinkPool(idle.size()); if (m_background) m_background->join(); if (m_diskSpaceMonitor) { delete m_diskSpaceMonitor; m_diskSpaceMonitor = NULL; } } /** * Return the singleton instance of the connection manager. * if none was created then create it. */ ConnectionManager *ConnectionManager::getInstance() { if (instance == 0) { instance = new ConnectionManager(); } return instance; } /** * Grow the connection pool by the number of connections * specified. * * @param delta The number of connections to add to the pool */ void ConnectionManager::growPool(unsigned int delta) { int poolSize = idle.size() + inUse.size(); if ((delta + poolSize) * m_attachedDatabases * NO_DESCRIPTORS_PER_DB > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) { Logger::getLogger()->warn("Request to grow database connection pool rejected" " due to excessive file descriptor usage"); return; } int failures = 0; while (delta-- > 0) { try { Connection *conn = new Connection(this); if (m_trace) conn->setTrace(true); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); } catch (...) { failures++; } } if (failures > 0) { idleLock.lock(); int idleCount = idle.size(); idleLock.unlock(); inUseLock.lock(); int inUseCount = inUse.size(); inUseLock.unlock(); Logger::getLogger()->warn("Connection pool growth restricted due to failure to create %d connections, %d idle connections & %d connection in use currently", failures, idleCount, inUseCount); noConnectionsDiagnostic(); } } /** * Attempt to shrink the number of connections in the idle pool * * @param delta Number of connections to attempt to remove * @return The number of connections removed. */ unsigned int ConnectionManager::shrinkPool(unsigned int delta) { unsigned int removed = 0; Connection *conn; while (delta-- > 0) { idleLock.lock(); conn = idle.back(); idle.pop_back(); idleLock.unlock(); if (conn) { delete conn; removed++; } else { break; } } return removed; } /** * Allocate a connection from the idle pool. If * no connection is available add a new connection */ Connection *ConnectionManager::allocate() { Connection *conn = 0; idleLock.lock(); if (idle.empty()) { try { conn = new Connection(this); } catch (...) { conn = NULL; Logger::getLogger()->error("Failed to create database connection to allocate"); noConnectionsDiagnostic(); } } else { conn = idle.front(); idle.pop_front(); } idleLock.unlock(); if (conn) { inUseLock.lock(); inUse.push_front(conn); inUseLock.unlock(); } return conn; } /** * Attach a database to all the connections, idle and inuse * * @param path - path of the database to attach * @param alias - alias to be assigned to the attached database */ bool ConnectionManager::attachNewDb(std::string &path, std::string &alias) { int rc; std::string sqlCmd; sqlite3 *dbHandle; bool result; char *zErrMsg = NULL; int poolSize = idle.size() + inUse.size(); if (poolSize * m_attachedDatabases * NO_DESCRIPTORS_PER_DB > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) { Logger::getLogger()->warn("Request to attach new database rejected" " due to excessive file descriptor usage"); return false; } result = true; sqlCmd = "ATTACH DATABASE '" + path + "' AS " + alias + ";"; idleLock.lock(); inUseLock.lock(); // attach the DB to all idle connections for (auto conn : idle) { dbHandle = conn->getDbHandle(); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { Logger::getLogger()->error("attachNewDb - It was not possible to attach the db :%s: to an idle connection, error :%s:", path.c_str(), zErrMsg); sqlite3_free(zErrMsg); result = false; // TODO We are potentially left in an inconsistant state with the new database // attached to some connections but not all. break; } Logger::getLogger()->debug("attachNewDb idle dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); } if (result) { // attach the DB to all inUse connections for (auto conn : inUse) { dbHandle = conn->getDbHandle(); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { Logger::getLogger()->error("attachNewDb - It was not possible to attach the db :%s: to an inUse connection, error :%s:", path.c_str() ,zErrMsg); sqlite3_free(zErrMsg); result = false; // TODO We are potentially left in an inconsistant state with the new // database attached to some connections but not all. break; } Logger::getLogger()->debug("attachNewDb inUse dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); } } m_attachedDatabases++; inUseLock.unlock(); idleLock.unlock(); return (result); } /** * Detach a database from all the connections * */ bool ConnectionManager::detachNewDb(std::string &alias) { int rc; std::string sqlCmd; sqlite3 *dbHandle; bool result; char *zErrMsg = NULL; result = true; sqlCmd = "DETACH DATABASE " + alias + ";"; Logger::getLogger()->debug("detachDb - db alias :%s: cmd :%s:" , alias.c_str() , sqlCmd.c_str() ); idleLock.lock(); inUseLock.lock(); // attach the DB to all idle connections for (auto conn : idle) { dbHandle = conn->getDbHandle(); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { Logger::getLogger()->error("detachNewDb - It was not possible to detach the db :%s: from an idle connection, error :%s:", alias.c_str(), zErrMsg); sqlite3_free(zErrMsg); result = false; break; } Logger::getLogger()->debug("detachNewDb - idle dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); } if (result) { // attach the DB to all inUse connections for (auto conn : inUse) { dbHandle = conn->getDbHandle(); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { Logger::getLogger()->error("detachNewDb - It was not possible to detach the db :%s: from an inUse connection, error :%s:", alias.c_str() ,zErrMsg); sqlite3_free(zErrMsg); result = false; break; } Logger::getLogger()->debug("detachNewDb - inUse dbHandle :%X: sqlCmd :%s: ", dbHandle, sqlCmd.c_str()); } } m_attachedDatabases--; inUseLock.unlock(); idleLock.unlock(); return (result); } /** * Adds to all the connections a request to attach a database * * * * @param newDbId - database id to attach * @param dbHandle - dbhandle for which the attach request should NOT be added * */ bool ConnectionManager::attachRequestNewDb(int newDbId, sqlite3 *dbHandle) { int rc; std::string sqlCmd; bool result; char *zErrMsg = NULL; int poolSize = idle.size() + inUse.size(); if (poolSize * m_attachedDatabases * NO_DESCRIPTORS_PER_DB > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) { Logger::getLogger()->warn("Request to attach nwe database rejected" " due to excessive file descriptor usage"); return false; } result = true; idleLock.lock(); inUseLock.lock(); // attach the DB to all idle connections for (auto conn : idle) { if (dbHandle == conn->getDbHandle()) { Logger::getLogger()->debug("attachRequestNewDb - idle skipped dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); } else { conn->setUsedDbId(newDbId); Logger::getLogger()->debug("attachRequestNewDb - idle, dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); } } if (result) { // attach the DB to all inUse connections { for (auto conn : inUse) { if (dbHandle == conn->getDbHandle()) { Logger::getLogger()->debug("attachRequestNewDb - inUse skipped dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); } else { conn->setUsedDbId(newDbId); Logger::getLogger()->debug("attachRequestNewDb - inUse, dbHandle :%X: sqlCmd :%s: ", conn->getDbHandle(), sqlCmd.c_str()); } } } } m_attachedDatabases++; inUseLock.unlock(); idleLock.unlock(); return (result); } /** * Release a connection back to the idle pool for * reallocation. * * @param conn The connection to release. */ void ConnectionManager::release(Connection *conn) { #if TRACK_CONNECTION_USER conn->clearUsage(); #endif inUseLock.lock(); inUse.remove(conn); inUseLock.unlock(); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); } /** * Set the last error information for a plugin. * * @param source The source of the error * @param description The error description * @param retryable Flag to determien if the error condition is transient */ void ConnectionManager::setError(const char *source, const char *description, bool retryable) { errorLock.lock(); if (lastError.entryPoint) free(lastError.entryPoint); if (lastError.message) free(lastError.message); lastError.retryable = retryable; lastError.entryPoint = strdup(source); lastError.message = strdup(description); errorLock.unlock(); } /** * SQLIte wrapper to retry statements when the database is locked * */ int ConnectionManager::SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **errMsg) { int retries = 0, rc; do { if (errMsg == NULL) { rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, NULL); } else { if (*errMsg) { sqlite3_free(*errMsg); *errMsg = NULL; } rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, errMsg); Logger::getLogger()->debug("SQLExec: rc :%d: ", rc); } if (rc != SQLITE_OK) { int interval = (retries * RETRY_BACKOFF); usleep(interval); // sleep retries milliseconds if (retries > 5) { Logger::getLogger()->warn("ConnectionManager::SQLExec - error :%s: dbHandle :%X: sqlCmd :%s: retry :%d: of :%d:", sqlite3_errmsg(dbHandle), dbHandle, sqlCmd, rc, MAX_RETRIES); } retries++; } } while (retries < MAX_RETRIES && (rc != SQLITE_OK)); if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("ConnectionManager::SQLExec - Database still locked after maximum retries"); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("ConnectionManager::SQLExec - Database still busy after maximum retries"); } return rc; } /** * Background thread used to execute periodic tasks and oversee the database activity. * * We will run the SQLite vacuum command periodically to allow space to be reclaimed */ void ConnectionManager::background() { time_t nextVacuum = time(0) + m_vacuumInterval; while (!m_shutdown) { if (m_diskSpaceMonitor) m_diskSpaceMonitor->periodic(15); // Called with the interval we sleep for sleep(15); time_t tim = time(0); if (m_vacuumInterval && tim > nextVacuum) { Connection *con = allocate(); con->vacuum(); release(con); nextVacuum = time(0) + m_vacuumInterval; } } } /** * Determine if we can allow another database to be created and attached to all the * connections. * * @return True if we can create another database. */ bool ConnectionManager::allowMoreDatabases() { // Allow for a couple of user defined schemas as well as the fledge database if (m_attachedDatabases + 4 > ReadingsCatalogue::getInstance()->getMaxAttached()) { return false; } int poolSize = idle.size() + inUse.size(); if (poolSize * (m_attachedDatabases + 1) * NO_DESCRIPTORS_PER_DB > (DESCRIPTOR_THRESHOLD * m_descriptorLimit) / 100) { return false; } return true; } void ConnectionManager::noConnectionsDiagnostic() { #if TRACK_CONNECTION_USER Logger *logger = Logger::getLogger(); inUseLock.lock(); logger->warn("There are %d connections in use currently", inUse.size()); for (auto conn : inUse) { logger->warn(" Connection is use by %s", conn->getUsage().c_str()); } inUseLock.unlock(); #endif } /** * Return the pragma configuration for the database */ string ConnectionManager::getDBConfiguration() { if (m_config && m_config->itemExists("deployment")) { string mode = m_config->getValue("deployment"); if (mode.compare("Small") == 0) return PRAGMA_SMALL; if (mode.compare("High Bandwidth") == 0) return PRAGMA_HISPEED; } return PRAGMA_NORMAL; } ================================================ FILE: C/plugins/storage/sqlite/common/include/connection.h ================================================ #ifndef _CONNECTION_H #define _CONNECTION_H /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sql_buffer.h> #include <string> #include <rapidjson/document.h> #include <sqlite3.h> #include <mutex> #include <reading_stream.h> #include <schema.h> #include <map> #include <vector> #include <atomic> class ConnectionManager; #define TRACK_CONNECTION_USER 0 // Set to 1 to get dianositcs about connection pool use #define READINGS_DB_FILE_NAME "/" READINGS_DB_NAME_BASE "_1.db" #define READINGS_DB READINGS_DB_NAME_BASE "_1" #define READINGS_TABLE "readings" #define READINGS_TABLE_MEM READINGS_TABLE "_1" // Set plugin name for log messages #ifndef PLUGIN_LOG_NAME #define PLUGIN_LOG_NAME "SQLite3" #endif // Retry mechanism #define PREP_CMD_MAX_RETRIES 50 // Maximum no. of retries when a lock is encountered #define PREP_CMD_RETRY_BASE 50 // Base time to wait for #define PREP_CMD_RETRY_BACKOFF 50 // Variable time to wait for #define MAX_RETRIES 40 // Maximum no. of retries when a lock is encountered #define RETRY_BACKOFF 50 // Multipler to backoff DB retry on lock /* * Control the way purge deletes readings. The block size sets a limit as to how many rows * get deleted in each call, whilst the sleep interval controls how long the thread sleeps * between deletes. The idea is to not keep the database locked too long and allow other threads * to have access to the database between blocks. */ #define PURGE_SLEEP_MS 500 #define PURGE_DELETE_BLOCK_SIZE 10000 #define MIN_PURGE_DELETE_BLOCK_SIZE 1000 #define MAX_PURGE_DELETE_BLOCK_SIZE 10000 #define TARGET_PURGE_BLOCK_DEL_TIME (70*1000) // 70 msec #define PURGE_BLOCK_SZ_GRANULARITY 5 // 5 rows #define RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS 30 // recalculate purge block size after every 30 blocks #define PURGE_SLOWDOWN_AFTER_BLOCKS 5 #define PURGE_SLOWDOWN_SLEEP_MS 500 #define SECONDS_PER_DAY "86400.0" // 2440587.5 is the julian day at 1/1/1970 0:00 UTC. #define JULIAN_DAY_START_UNIXTIME "2440587.5" #define START_TIME std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); #define END_TIME std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); \ auto usecs = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); int dateCallback(void *data, int nCols, char **colValues, char **colNames); bool applyColumnDateFormat(const std::string& inFormat, const std::string& colName, std::string& outFormat, bool roundMs = false); bool applyColumnDateFormatLocaltime(const std::string& inFormat, const std::string& colName, std::string& outFormat, bool roundMs = false); int rowidCallback(void *data, int nCols, char **colValues, char **colNames); int selectCallback(void *data, int nCols, char **colValues, char **colNames); int countCallback(void *data, int nCols, char **colValues, char **colNames); bool applyDateFormat(const std::string& inFormat, std::string& outFormat); class Connection { public: Connection(ConnectionManager *manager); ~Connection(); #ifndef SQLITE_SPLIT_READINGS bool createSchema(const std::string& schema); bool retrieve(const std::string& schema, const std::string& table, const std::string& condition, std::string& resultSet); int insert(const std::string& schema, const std::string& table, const std::string& data); int update(const std::string& schema, const std::string& table, const std::string& data); int deleteRows(const std::string& schema, const std::string& table, const std::string& condition); int create_table_snapshot(const std::string& table, const std::string& id); int load_table_snapshot(const std::string& table, const std::string& id); int delete_table_snapshot(const std::string& table, const std::string& id); bool get_table_snapshots(const std::string& table, std::string& resultSet); #endif int appendReadings(const char *readings); int readingStream(ReadingStream **readings, bool commit); bool fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet); bool retrieveReadings(const std::string& condition, std::string& resultSet); unsigned int purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& results); unsigned int purgeReadingsByRows(unsigned long rowcount, unsigned int flags, unsigned long sent, std::string& results); long tableSize(const std::string& table); void setTrace(bool); bool formatDate(char *formatted_date, size_t formatted_date_size, const char *date); bool aggregateQuery(const rapidjson::Value& payload, std::string& resultSet); bool getNow(std::string& Now); sqlite3 *getDbHandle() {return dbHandle;}; void setUsedDbId(int dbId); void shutdownAppendReadings(); unsigned int purgeReadingsAsset(const std::string& asset); bool vacuum(); bool supportsReadings() { return ! m_noReadings; }; #if TRACK_CONNECTION_USER void setUsage(std::string usage) { m_usage = usage; }; void clearUsage() { m_usage = ""; }; std::string getUsage() { return m_usage; }; #endif private: std::string operation(const char *sql); std::vector<int> m_NewDbIdList; // Newly created databases that should be attached bool m_streamOpenTransaction; int m_queuing; std::mutex m_qMutex; int SQLPrepare(sqlite3 *dbHandle, const char *sqlCmd, sqlite3_stmt **readingsStmt); int SQLexec(sqlite3 *db, const std::string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg); int SQLstep(sqlite3_stmt *statement); bool m_logSQL; void raiseError(const char *operation, const char *reason,...); sqlite3 *dbHandle; SchemaManager *m_schemaManager; int mapResultSet(void *res, std::string& resultSet, unsigned long *rowsCount = nullptr); #ifndef SQLITE_SPLIT_READINGS bool jsonWhereClause(const rapidjson::Value& whereClause, SQLBuffer&, std::vector<std::string> &asset_codes, bool convertLocaltime = false, std::string prefix = ""); #else bool jsonWhereClause(const rapidjson::Value& whereClause, SQLBuffer&, bool convertLocaltime = false, std::string prefix = ""); #endif bool jsonModifiers(const rapidjson::Value&, SQLBuffer&, bool isTableReading = false); #ifndef SQLITE_SPLIT_READINGS bool jsonAggregates(const rapidjson::Value&, const rapidjson::Value&, SQLBuffer&, SQLBuffer&, bool &isOptAggregate, bool isTableReading = false, bool isExtQuery = false ); #else bool jsonAggregates(const rapidjson::Value&, const rapidjson::Value&, SQLBuffer&, SQLBuffer&, bool isTableReading = false); #endif bool returnJson(const rapidjson::Value&, SQLBuffer&, SQLBuffer&); char *trim(char *str); const std::string escape(const std::string&); bool applyColumnDateTimeFormat(sqlite3_stmt *pStmt, int i, std::string& newDate); void logSQL(const char *, const char *); bool selectColumns(const rapidjson::Value& document, SQLBuffer& sql, int level); bool appendTables(const std::string& schema, const rapidjson::Value& document, SQLBuffer& sql, int level); bool processJoinQueryWhereClause(const rapidjson::Value& query, SQLBuffer& sql, std::vector<std::string> &asset_codes, int level); bool m_noReadings; #if TRACK_CONNECTION_USER std::string m_usage; #endif ConnectionManager *m_manager; }; #endif ================================================ FILE: C/plugins/storage/sqlite/common/include/connection_manager.h ================================================ #ifndef _CONNECTION_MANAGER_H #define _CONNECTION_MANAGER_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <sqlite3.h> #include <plugin_api.h> #include <list> #include <mutex> #include <thread> #include <config_category.h> #define NO_DESCRIPTORS_PER_DB 3 // 3 deascriptors per database when using WAL mode #define DESCRIPTOR_THRESHOLD 75 // Percentage of descriptors that can be used on database connections class Connection; class DiskSpaceMonitor; /** * Singleton class to manage SQLite3 connection pool */ class ConnectionManager { public: static ConnectionManager *getInstance(); void growPool(unsigned int); unsigned int shrinkPool(unsigned int); Connection *allocate(); bool attachNewDb(std::string &path, std::string &alias); bool attachRequestNewDb(int newDbId, sqlite3 *dbHandle); bool detachNewDb(std::string &alias); void release(Connection *); void shutdown(); void setError(const char *, const char *, bool); PLUGIN_ERROR *getError() { return &lastError; } void background(); void setVacuumInterval(long hours) { m_vacuumInterval = 60 * 60 * hours; }; bool allowMoreDatabases(); void setConfiguration(ConfigCategory *category) { m_config = category; }; std::string getDBConfiguration(); protected: ConnectionManager(); private: static ConnectionManager *instance; int SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **errMsg); void noConnectionsDiagnostic(); protected: std::list<Connection *> idle; std::list<Connection *> inUse; std::mutex idleLock; std::mutex inUseLock; std::mutex errorLock; PLUGIN_ERROR lastError; bool m_trace; bool m_shutdown; std::thread *m_background; long m_vacuumInterval; unsigned int m_descriptorLimit; unsigned int m_attachedDatabases; DiskSpaceMonitor *m_diskSpaceMonitor; ConfigCategory *m_config; }; #endif ================================================ FILE: C/plugins/storage/sqlite/common/include/purge_configuration.h ================================================ #ifndef _PURGE_CONFIGURATION_H #define _PURGE_CONFIGURATION_H /* * Fledge storage service - Purge configuration * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <vector> #include <cstdint> class PurgeConfiguration { public: static PurgeConfiguration *getInstance(); void exclude(const std::string& asset); bool hasExclusions() { return m_exclude.size() != 0; }; bool isExcluded(const std::string& asset); void minimumRetained(uint32_t minimum); uint32_t getMinimumRetained() { return m_minimum; }; private: PurgeConfiguration(); ~PurgeConfiguration(); private: static PurgeConfiguration *m_instance; std::vector<std::string> m_exclude; uint32_t m_minimum; }; #endif ================================================ FILE: C/plugins/storage/sqlite/common/include/readings_catalogue.h ================================================ #ifndef _READINGS_CATALOGUE_H #define _READINGS_CATALOGUE_H /* * Fledge storage service - Readings catalogue handling * * Copyright (c) 2020 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli, Massimiliano Pinto */ #include "connection.h" #include <thread> #define OVERFLOW_TABLE_ID 0 // Table ID to use for the overflow table /** * This class handles per thread started transaction boundaries: */ class TransactionBoundary { public: TransactionBoundary() {}; unsigned long GetMinReadingId(); void SetThreadTransactionStart(std::thread::id tid, unsigned long id); void ClearThreadTransaction(std::thread::id); private: std::map<std::thread::id, unsigned long> m_boundaries; std::mutex m_boundaryLock; }; /** * - poolSize = Number of connections to allocate * - nReadingsPerDb = Number of readings tables per database * - nDbPreallocate = Number of databases to allocate in advance * - nDbLeftFreeBeforeAllocate = Number of free databases before a new allocation is executed * - nDbToAllocate = Number of database to allocate each time * */ typedef struct { int poolSize = 5; int nReadingsPerDb = 14; int nDbPreallocate = 3; int nDbLeftFreeBeforeAllocate = 1; int nDbToAllocate = 2; } STORAGE_CONFIGURATION; /** * Class used to store table references */ class TableReference { public: TableReference(int dbId, int tableId) : m_dbId(dbId), m_tableId(tableId) { m_issued = time(0); }; time_t lastIssued() { return m_issued; }; int getTable() { return m_tableId; }; int getDatabase() { return m_dbId; }; void issue() { m_issued = time(0); }; private: int m_dbId; int m_tableId; time_t m_issued; }; /** * Implements the handling of multiples readings tables stored among multiple SQLite databases. * * The databases are named using the format readings_<dbid>, like for example readings_1.db * and each database contains multiples readings named as readings_<dbid>_<id> like readings_1_1, readings_1_2 * * The table asset_reading_catalogue is used as a catalogue in order map a particular asset_code * to a table that holds readings for that asset_code. * * readings_1.asset_reading_catalogue: * - table_id INTEGER NOT NULL, * - db_id INTEGER NOT NULL, * - asset_code character varying(50) NOT NULL * * The first reading table readings_1_1 is created by the script init_readings.sql executed during the storage init * all the other readings tables are created by the code when Fledge starts. * * The table configuration_readings created by the script init_readings.sql keeps track of the information: * * - global_id -- Stores the last global Id used +1, Updated at -1 when Fledge starts, Updated at the proper value when Fledge stops * - db_id_Last -- Latest database available * - n_readings_per_db -- Number of readings table per database * - n_db_preallocate -- Number of databases to allocate in advance * * The readings tables are allocated in sequence starting from the readings_1_1 and proceeding with the other tables available in the first database. * The tables in the 2nd database (readings_2.db) will be used when all the tables in the first db are allocated. * * Implementation notes: * * 1) Many functions receive the database connection as an input parameter: * * - sqlite3 *dbHandle * * and they will use that connection for the sql operations instead of allocating a new one each time. * This approach allows to: * * - allocate a connection once using it for all the following operations * - avoid to receive in use a connection having a different configuration (attached databases) * as the connections are handled in pool and it is not defined which one will be allocated * moreover all the operations are executed in parallel in multi threads * */ class ReadingsCatalogue { public: typedef struct ReadingReference { int dbId; int tableId; } tyReadingReference; static ReadingsCatalogue *getInstance() { static ReadingsCatalogue *instance = 0; if (!instance) { instance = new ReadingsCatalogue; } return instance; } void multipleReadingsInit(STORAGE_CONFIGURATION &storageConfig); std::string generateDbAlias(int dbId); std::string generateDbName(int tableId); std::string generateDbFileName(int dbId); std::string generateDbNameFromTableId(int tableId); std::string generateReadingsName(int dbId, int tableId); void getAllDbs(std::vector<int> &dbIdList); void getNewDbs(std::vector<int> &dbIdList); int getMaxReadingsId(int dbId); int getReadingsCount(); int getReadingPosition(int dbId, int tableId); int getNReadingsAvailable() const {return m_nReadingsAvailable;} long getIncGlobalId() { return m_ReadingsGlobalId.fetch_add(1); } // returns the value before the add operation long getMinGlobalId (sqlite3 *dbHandle); long getGlobalId() {return m_ReadingsGlobalId;}; bool evaluateGlobalId(); bool storeGlobalId (); void preallocateReadingsTables(int dbId); bool loadAssetReadingCatalogue(); bool loadEmptyAssetReadingCatalogue(bool clean = true); bool latestDbUpdate(sqlite3 *dbHandle, int newDbId); int preallocateNewDbsRange(int dbIdStart, int dbIdEnd); tyReadingReference getEmptyReadingTableReference(std::string& asset); tyReadingReference getReadingReference(Connection *connection, const char *asset_code); bool attachDbsToAllConnections(); std::string sqlConstructMultiDb(std::string &sqlCmdBase, std::vector<std::string> &assetCodes, bool considerExclusion=false); std::string sqlConstructOverflow(std::string &sqlCmdBase, std::vector<std::string> &assetCodes, bool considerExclusion=false, bool groupBy = false); int purgeAllReadings(sqlite3 *dbHandle, const char *sqlCmdBase, char **errMsg = NULL, unsigned long *rowsAffected = NULL); bool connectionAttachAllDbs(sqlite3 *dbHandle); bool connectionAttachDbList(sqlite3 *dbHandle, std::vector<int> &dbIdList); bool attachDb(sqlite3 *dbHandle, std::string &path, std::string &alias, int dbId); void detachDb(sqlite3 *dbHandle, std::string &alias); void setUsedDbId(int dbId); int extractReadingsIdFromName(std::string tableName); int extractDbIdFromName(std::string tableName); int SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **errMsg = NULL); bool createReadingsOverflowTable(sqlite3 *dbHandle, int dbId); int getMaxAttached() { return m_attachLimit; }; private: STORAGE_CONFIGURATION m_storageConfigCurrent; // The current configuration of the multiple readings STORAGE_CONFIGURATION m_storageConfigApi; // The parameters retrieved from the API enum NEW_DB_OPERATION { NEW_DB_ATTACH_ALL, NEW_DB_ATTACH_REQUEST, NEW_DB_DETACH }; enum ACTION { ACTION_DB_ADD, ACTION_DB_REMOVE, ACTION_DB_NONE, ACTION_TB_ADD, ACTION_TB_REMOVE, ACTION_TB_NONE, ACTION_INVALID }; typedef struct ReadingAvailable { int lastReadings; int tableCount; } tyReadingsAvailable; ReadingsCatalogue(); bool createNewDB(sqlite3 *dbHandle, int newDbId, int startId, NEW_DB_OPERATION attachAllDb); int getUsedTablesDbId(int dbId); int getNReadingsAllocate() const {return m_storageConfigCurrent.nReadingsPerDb;} bool createReadingsTables(sqlite3 *dbHandle, int dbId, int idStartFrom, int nTables); bool isReadingAvailable() const; void allocateReadingAvailable(); tyReadingsAvailable evaluateLastReadingAvailable(sqlite3 *dbHandle, int dbId); long calculateGlobalId (sqlite3 *dbHandle); std::string generateDbFilePath(int dbId); void raiseError(const char *operation, const char *reason,...); int SQLStep(sqlite3_stmt *statement); bool enableWAL(std::string &dbPathReadings); bool configurationRetrieve(sqlite3 *dbHandle); void prepareAllDbs(); bool applyStorageConfigChanges(sqlite3 *dbHandle); void dbFileDelete(std::string dbPath); void dbsRemove(int startId, int endId); void storeReadingsConfiguration (sqlite3 *dbHandle); ACTION changesLogicDBs(int dbIdCurrent , int dbIdLast, int nDbPreallocateCurrent, int nDbPreallocateRequest, int nDbLeftFreeBeforeAllocate); ACTION changesLogicTables(int maxUsed ,int Current, int Request); int retrieveDbIdFromTableId(int tableId); void configChangeAddDb(sqlite3 *dbHandle); void configChangeRemoveDb(sqlite3 *dbHandle); void configChangeAddTables(sqlite3 *dbHandle , int startId, int endId); void configChangeRemoveTables(sqlite3 *dbHandle , int startId, int endId); int calcMaxReadingUsed(); void dropReadingsTables(sqlite3 *dbHandle, int dbId, int idStart, int idEnd); int m_dbIdCurrent; // Current database in use int m_dbIdLast; // Last database available not already in use int m_dbNAvailable; // Number of databases available std::vector<int> m_dbIdList; // Databases already created but not in use std::atomic<long> m_ReadingsGlobalId; // Global row id shared among all the readings table int m_nReadingsAvailable = 0; // Number of readings tables available std::map <std::string, TableReference> m_AssetReadingCatalogue={ // In memory structure to identify in which database/table an asset is stored // asset_code - reading Table Id, Db Id // {"", ,{1 ,1 }} }; std::map <std::string, std::pair<int, int>> m_EmptyAssetReadingCatalogue={ // In memory structure to identify in which database/table an asset is empty // asset_code - reading Table Id, Db Id // {"", ,{1 ,1 }} }; int m_nextOverflow; // The next database to use for overflow assets int m_attachLimit; int m_maxOverflowUsed; int m_compounds; // Max number of compound statements std::mutex m_emptyReadingTableMutex; public: TransactionBoundary m_tx; }; /** * Used to synchronize the attach database operation */ class AttachDbSync { public: static AttachDbSync *getInstance() { static AttachDbSync instance; return &instance; } void lock() {m_dbLock.lock();} void unlock() {m_dbLock.unlock();} private: AttachDbSync(){}; std::mutex m_dbLock; }; #endif ================================================ FILE: C/plugins/storage/sqlite/common/include/sqlite_common.h ================================================ #ifndef _COMMON_CONNECTION_H #define _COMMON_CONNECTION_H #include <sql_buffer.h> #include <iostream> #include <sqlite3.h> #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <string> #include <map> #include <stdarg.h> #include <stdlib.h> #include <sstream> #include <logger.h> #include <time.h> #include <unistd.h> #include <chrono> #include <thread> #include <atomic> #include <condition_variable> #include <sys/time.h> #define _DB_NAME "/fledge.db" #define READINGS_DB_NAME_BASE "readings" #define DB_CONFIGURATION "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;" #define LEN_BUFFER_DATE 100 #define F_TIMEH24_S "%H:%M:%S" #define F_DATEH24_S "%Y-%m-%d %H:%M:%S" #define F_DATEH24_M "%Y-%m-%d %H:%M" #define F_DATEH24_H "%Y-%m-%d %H" // This is the default datetime format in Fledge: 2018-05-03 18:15:00.622 #define F_DATEH24_MS "%Y-%m-%d %H:%M:%f" // Format up to seconds #define F_DATEH24_SEC "%Y-%m-%d %H:%M:%S" #define SQLITE3_NOW "strftime('%Y-%m-%d %H:%M:%f', 'now', 'localtime')" // The default precision is milliseconds, it adds microseconds and timezone #define SQLITE3_NOW_READING "strftime('%Y-%m-%d %H:%M:%f000+00:00', 'now')" #define SQLITE3_FLEDGE_DATETIME_TYPE "DATETIME" #define STORAGE_PURGE_RETAIN_ANY 0x0001U #define STORAGE_PURGE_RETAIN_ALL 0x0002U #define STORAGE_PURGE_SIZE 0x0004U static std::map<std::string, std::string> sqliteDateFormat = { {"HH24:MI:SS", F_TIMEH24_S}, {"YYYY-MM-DD HH24:MI:SS.MS", F_DATEH24_MS}, {"YYYY-MM-DD HH24:MI:SS", F_DATEH24_S}, {"YYYY-MM-DD HH24:MI", F_DATEH24_M}, {"YYYY-MM-DD HH24", F_DATEH24_H}, {"", ""} }; #endif ================================================ FILE: C/plugins/storage/sqlite/common/purge_configuration.cpp ================================================ /* * Fledge Fledge Configuration management. * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <purge_configuration.h> #include <logger.h> using namespace std; PurgeConfiguration *PurgeConfiguration::m_instance = 0; /** * Constructor for the purge configurtion class */ PurgeConfiguration::PurgeConfiguration() : m_minimum(0) { } /** * Destructor for the purge configurtion class */ PurgeConfiguration::~PurgeConfiguration() { } /** * Return the singleton instance of the PurgeConfiguration class * for this plugin * * @return PurgeConfiguration* singleton instance */ PurgeConfiguration *PurgeConfiguration::getInstance() { if (m_instance == 0) { m_instance = new PurgeConfiguration(); } return m_instance; } /** * Add an asset to the exclusion list * * @param asset the asset to add to the exclusion list */ void PurgeConfiguration::exclude(const string& asset) { Logger::getLogger()->debug("'%s' added to exclusion list", asset.c_str()); m_exclude.push_back(asset); } /** * Check if the named asset appears in the exclusion list * * @param asset Asset to check for exclusion * @return True if the asset is excluded */ bool PurgeConfiguration::isExcluded(const string& asset) { for (auto it = m_exclude.cbegin(); it != m_exclude.cend(); it++) { if (it->compare(asset) == 0) { return true; } } return false; } /** * Set the minimum number of rows to retian for each asset * * @param minimum Minimum number of rows to retain */ void PurgeConfiguration::minimumRetained(uint32_t minimum) { m_minimum = minimum; } ================================================ FILE: C/plugins/storage/sqlite/common/readings.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <math.h> #include <sqlite_common.h> #include <connection.h> #include <connection_manager.h> #include <reading_stream.h> #include <random> #include <utils.h> #include <sys/stat.h> #include <string_utils.h> #include <algorithm> #include <vector> #include <readings_catalogue.h> // 1 enable performance tracking #define INSTRUMENT 0 #define LOG_AFTER_NERRORS 0 #if INSTRUMENT #include <sys/time.h> #endif // Decode stream data #define RDS_USER_TIMESTAMP(stream, x) stream[x]->userTs #define RDS_ASSET_CODE(stream, x) stream[x]->assetCode #define RDS_PAYLOAD(stream, x) &(stream[x]->assetCode[0]) + stream[x]->assetCodeLength //#ifndef PLUGIN_LOG_NAME //#define PLUGIN_LOG_NAME "SQLite 3" //#endif /** * SQLite3 storage plugin for Fledge */ using namespace std; using namespace rapidjson; #define CONNECT_ERROR_THRESHOLD 5*60 // 5 minutes /* * The following allows for conditional inclusion of code that tracks the top queries * run by the storage plugin and the number of times a particular statement has to * be retried because of the database being busy./ */ #define DO_PROFILE 0 #define DO_PROFILE_RETRIES 0 #if DO_PROFILE #include <profile.h> #define TOP_N_STATEMENTS 10 // Number of statements to report in top n #define RETRY_REPORT_THRESHOLD 1000 // Report retry statistics every X calls QueryProfile profiler(TOP_N_STATEMENTS); unsigned long retryStats[MAX_RETRIES] = { 0,0,0,0,0,0,0,0,0,0 }; unsigned long numStatements = 0; int maxQueue = 0; #endif static std::atomic<int> m_waiting(0); static std::atomic<int> m_writeAccessOngoing(0); static std::mutex db_mutex; static std::condition_variable db_cv; static int purgeBlockSize = PURGE_DELETE_BLOCK_SIZE; static time_t connectErrorTime = 0; // Used to synchronize the shut down of the threads executing appendReadings static std::atomic<int> m_appendCount(0); static bool m_shutdown=false; #ifndef SQLITE_SPLIT_READINGS /** * Check whether to compute timebucket query with min,max,avg for all datapoints * * @param payload JSON payload * @return True if aggregation is 'all' */ bool aggregateAll(const Value& payload) { if (payload.HasMember("aggregate") && payload["aggregate"].IsObject()) { const Value& agg = payload["aggregate"]; if (agg.HasMember("operation") && (strcmp(agg["operation"].GetString(), "all") == 0)) { return true; } } return false; } #endif #ifndef SQLITE_SPLIT_READINGS /** * Build, exucute and return data of a timebucket query with min,max,avg for all datapoints * * @param payload JSON object for timebucket query * @param resultSet JSON Output buffer * @return True of success, false on any error */ bool Connection::aggregateQuery(const Value& payload, string& resultSet) { vector<string> asset_codes; if (!payload.HasMember("where") || !payload.HasMember("timebucket")) { raiseError("retrieve", "aggregateQuery is missing " "'where' and/or 'timebucket' properties"); return false; } SQLBuffer sql; sql.append("SELECT asset_code, "); double size = 1; string timeColumn; // Check timebucket object if (payload.HasMember("timebucket")) { const Value& bucket = payload["timebucket"]; if (!bucket.HasMember("timestamp")) { raiseError("retrieve", "aggregateQuery is missing " "'timestamp' property for 'timebucket'"); return false; } // Time column timeColumn = bucket["timestamp"].GetString(); // Bucket size if (bucket.HasMember("size")) { size = atof(bucket["size"].GetString()); if (!size) { size = 1; } } // Time format for output string newFormat; if (bucket.HasMember("format") && size >= 1) { applyColumnDateFormatLocaltime(bucket["format"].GetString(), "timestamp", newFormat, true); sql.append(newFormat); } else { if (size < 1) { // sub-second granularity to time bucket size: // force output formatting with microseconds newFormat = "strftime('%Y-%m-%d %H:%M:%S', " + timeColumn + ", 'localtime') || substr(" + timeColumn + ", instr(" + timeColumn + ", '.'), 7)"; sql.append(newFormat); } else { sql.append("timestamp"); } } // Time output alias if (bucket.HasMember("alias")) { sql.append(" AS "); sql.append(bucket["alias"].GetString()); } } // JSON format aggregated data sql.append(", '{' || group_concat('\"' || x || '\" : ' || resd, ', ') || '}' AS reading "); // subquery sql.append("FROM ( SELECT x, asset_code, max(timestamp) AS timestamp, "); // Add min sql.append("'{\"min\" : ' || min(theval) || ', "); // Add max sql.append("\"max\" : ' || max(theval) || ', "); // Add avg sql.append("\"average\" : ' || avg(theval) || ', "); // Add count sql.append("\"count\" : ' || count(theval) || ', "); // Add sum sql.append("\"sum\" : ' || sum(theval) || '}' AS resd "); if (size < 1) { // Add max(user_ts) sql.append(", max(" + timeColumn + ") AS " + timeColumn + " "); } // subquery sql.append("FROM ( SELECT asset_code, "); sql.append(timeColumn); if (size >= 1) { sql.append(", datetime("); } else { sql.append(", ("); } // Size formatted string string size_format; if (fmod(size, 1.0) == 0.0) { size_format = to_string(int(size)); } else { size_format = to_string(size); } // Add timebucket size // Unix Time is (Julian Day - JulianDay(1/1/1970 0:00 UTC) * Seconds_per_day if (size != 1) { sql.append(size_format); sql.append(" * round((julianday("); sql.append(timeColumn); sql.append(") - " + string(JULIAN_DAY_START_UNIXTIME) + ") * " + string(SECONDS_PER_DAY) + " / "); sql.append(size_format); sql.append(")"); } else { sql.append("round((julianday("); sql.append(timeColumn); sql.append(") - " + string(JULIAN_DAY_START_UNIXTIME) + ") * " + string(SECONDS_PER_DAY) + " / 1)"); } if (size >= 1) { sql.append(", 'unixepoch') AS \"timestamp\", reading, "); } else { sql.append(") AS \"timestamp\", reading, "); } // Get all datapoints in 'reading' field sql.append("json_each.key AS x, json_each.value AS theval FROM "); { string sql_cmd; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); // SQL - start sql_cmd = R"( ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT ROWID, id, \"_assetcode_\" asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as reading_table )"; sql.append(sql_cmd.c_str()); sql.append(", json_each(reading_table.reading) "); } // Add where condition sql.append("WHERE "); if (!jsonWhereClause(payload["where"], sql, asset_codes)) { raiseError("retrieve", "aggregateQuery: failure while building WHERE clause"); return false; } // close subquery sql.append(") tmp "); // Add group by // Unix Time is (Julian Day - JulianDay(1/1/1970 0:00 UTC) * Seconds_per_day sql.append(" GROUP BY x, asset_code, "); sql.append("round((julianday("); sql.append(timeColumn); sql.append(") - " + string(JULIAN_DAY_START_UNIXTIME) + ") * " + string(SECONDS_PER_DAY) + " / "); if (size != 1) { sql.append(size_format); } else { sql.append('1'); } sql.append(") "); // close subquery sql.append(") tbl "); // Add final group and sort sql.append("GROUP BY timestamp, asset_code ORDER BY timestamp DESC"); // Add limit if (payload.HasMember("limit")) { if (!payload["limit"].IsInt()) { raiseError("retrieve", "aggregateQuery: limit must be specfied as an integer"); return false; } sql.append(" LIMIT "); try { sql.append(payload["limit"].GetInt()); } catch (exception e) { raiseError("retrieve", "aggregateQuery: bad value for limit parameter: %s", e.what()); return false; } } sql.append(';'); // Execute query const char *query = sql.coalesce(); int rc; sqlite3_stmt *stmt; logSQL("CommonRetrieve", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); // Release memory for 'query' var delete[] query; if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } return true; } #endif /** * Append a stream of readings to SQLite db * * @param readings readings to store into the SQLite db * @param commit if true a database commit is executed and a new transaction will be opened at the next execution * * TODO: the current code should be adapted to use the multi databases/tables implementation * */ int Connection::readingStream(ReadingStream **readings, bool commit) { // Row defintion related int i; bool add_row = false; const char *user_ts; string now; char ts[60], micro_s[10]; char formatted_date[LEN_BUFFER_DATE] = {0}; struct tm timeinfo; const char *asset_code; const char *payload; string reading; // Retry mechanism int retries = 0; int sleep_time_ms = 0; // SQLite related sqlite3_stmt *stmt; int sqlite3_resut; int rowNumber = -1; if (m_noReadings) { Logger::getLogger()->error("Attempt to stream readings to plugin that has no storage for readings"); return 0; } ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); { // Attaches the needed databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); if ( ! m_NewDbIdList.empty()) { readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } attachSync->unlock(); } #if INSTRUMENT struct timeval start, t1, t2, t3, t4, t5; #endif // * TODO: the current code should be adapted to use the multi databases/tables implementation const char *sql_cmd = "INSERT INTO " READINGS_DB ".readings_1 ( asset_code, reading, user_ts ) VALUES (?,?,?)"; if (sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL) != SQLITE_OK) { raiseError("readingStream", sqlite3_errmsg(dbHandle)); return -1; } // The handling of the commit parameter is overridden as using a pool of connections every execution receives // a differen one, so a commit at every run is executed. m_streamOpenTransaction = true; commit = true; if (m_streamOpenTransaction) { if (sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { raiseError("readingStream", sqlite3_errmsg(dbHandle)); return -1; } m_streamOpenTransaction = false; } #if INSTRUMENT gettimeofday(&start, NULL); #endif try { for (i = 0; readings[i]; i++) { add_row = true; // Handles - asset_code asset_code = RDS_ASSET_CODE(readings, i); // Handles - reading payload = RDS_PAYLOAD(readings, i); reading = escape(payload); // Handles - user_ts memset(&timeinfo, 0, sizeof(struct tm)); gmtime_r(&RDS_USER_TIMESTAMP(readings, i).tv_sec, &timeinfo); std::strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", &timeinfo); snprintf(micro_s, sizeof(micro_s), ".%06lu", RDS_USER_TIMESTAMP(readings, i).tv_usec); formatted_date[0] = {0}; strncat(ts, micro_s, 10); user_ts = ts; if (strcmp(user_ts, "now()") == 0) { getNow(now); user_ts = now.c_str(); } else { if (!formatDate(formatted_date, sizeof(formatted_date), user_ts)) { raiseError("appendReadings", "Invalid date '%s'", user_ts); add_row = false; } else { user_ts = formatted_date; } } if (add_row) { if (stmt != NULL) { sqlite3_bind_text(stmt, 1, asset_code, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 2, reading.c_str(), -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 3, user_ts, -1, SQLITE_STATIC); retries =0; sleep_time_ms = 0; // Retry mechanism in case SQLlite DB is locked do { // Insert the row using a lock to ensure one insert at time { m_writeAccessOngoing.fetch_add(1); //unique_lock<mutex> lck(db_mutex); sqlite3_resut = sqlite3_step(stmt); m_writeAccessOngoing.fetch_sub(1); //db_cv.notify_all(); } if (sqlite3_resut == SQLITE_LOCKED ) { sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); retries++; Logger::getLogger()->info("SQLITE_LOCKED - record %d - retry number %d sleep time ms %d",i, retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } if (sqlite3_resut == SQLITE_BUSY) { ostringstream threadId; threadId << std::this_thread::get_id(); sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); retries++; Logger::getLogger()->info("SQLITE_BUSY - thread '%s' - record %d - retry number %d sleep time ms %d", threadId.str().c_str() ,i , retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); if (sqlite3_resut == SQLITE_DONE) { rowNumber++; sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } else { raiseError("appendReadings", "Inserting a row into SQLIte using a prepared command - asset_code '%s' error '%s' reading '%s' ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; return -1; } } } } rowNumber = i; } catch (exception e) { raiseError("appendReadings", "Inserting a row into SQLIte using a prepared command - error '%s'", e.what()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; return -1; } #if INSTRUMENT gettimeofday(&t1, NULL); #endif if (commit) { sqlite3_resut = sqlite3_exec(dbHandle, "END TRANSACTION", NULL, NULL, NULL); if (sqlite3_resut != SQLITE_OK) { raiseError("appendReadings", "Executing the commit of the transaction - error '%s'", sqlite3_errmsg(dbHandle)); rowNumber = -1; } m_streamOpenTransaction = true; } if(stmt != NULL) { if (sqlite3_finalize(stmt) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory structure - error '%s'", sqlite3_errmsg(dbHandle)); } } #if INSTRUMENT gettimeofday(&t2, NULL); #endif #if INSTRUMENT struct timeval tm; double timeT1, timeT2, timeT3; timersub(&t1, &start, &tm); timeT1 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); Logger::getLogger()->debug("readingStream row count %d", rowNumber); Logger::getLogger()->debug("readingStream Timing - stream handling %.3f seconds - commit/finalize %.3f seconds", timeT1, timeT2 ); #endif return rowNumber; } /** * Append a set of readings to the readings table */ void Connection::setUsedDbId(int dbId) { m_NewDbIdList.push_back(dbId); } /** * Wait until all the threads executing the appendReadings are shutted down */ void Connection::shutdownAppendReadings() { ostringstream threadId; threadId << std::this_thread::get_id(); Logger::getLogger()->debug("%s - thread Id '%s' appendReadings shutting down started", __FUNCTION__, threadId.str().c_str()); m_shutdown=true; while (m_appendCount > 0) { Logger::getLogger()->debug("%s - thread Id '%s' waiting threads to shut down, count %d ", __FUNCTION__, threadId.str().c_str(), int(m_appendCount)); std::this_thread::sleep_for(std::chrono::milliseconds(150)); } Logger::getLogger()->debug("%s - thread Id '%s' appendReadings shutting down ended", __FUNCTION__, threadId.str().c_str()); } #ifndef SQLITE_SPLIT_READINGS /** * Append a set of readings to the readings table */ int Connection::appendReadings(const char *readings) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document doc; int row = 0, readingId; bool add_row = false; int lastReadingsId; // Variables related to the SQLite insert using prepared command const char *user_ts; const char *asset_code; string reading, msg; sqlite3_stmt *stmt; int rc; int sqlite3_resut; int readingsId; string now; std::pair<int, sqlite3_stmt *> pairValue; string lastAsset; // Retry mechanism int retries = 0; int sleep_time_ms = 0; int stmtArraySize; std::thread::id tid = std::this_thread::get_id(); ostringstream threadId; if (m_noReadings) { Logger::getLogger()->error("Attempt to append readings to plugin that has no storage for readings"); return 0; } threadId << tid; { if (m_shutdown) { Logger::getLogger()->debug("%s - thread Id '%s' plugin is shutting down, operation cancelled", __FUNCTION__, threadId.str().c_str()); return -1; } m_appendCount++; Logger::getLogger()->debug("%s - thread Id '%s' operation started , threads count %d ", __FUNCTION__, threadId.str().c_str(), int(m_appendCount) ); } ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); // Attaches the needed databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); if ( ! m_NewDbIdList.empty()) { readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } attachSync->unlock(); stmtArraySize = readCatalogue->getReadingPosition(0, 0); vector<sqlite3_stmt *> readingsStmt(stmtArraySize + 1, nullptr); #if INSTRUMENT Logger::getLogger()->debug("appendReadings start thread '%s'", threadId.str().c_str()); struct timeval start, t1, t2, t3, t4, t5; #endif #if INSTRUMENT gettimeofday(&start, NULL); #endif ParseResult ok = doc.Parse(readings); if (!ok) { raiseError("appendReadings", GetParseError_En(doc.GetParseError())); m_appendCount--; return -1; } if (!doc.HasMember("readings")) { raiseError("appendReadings", "Payload is missing a readings array"); m_appendCount--; return -1; } Value &readingsValue = doc["readings"]; if (!readingsValue.IsArray()) { raiseError("appendReadings", "Payload is missing the readings array"); m_appendCount--; return -1; } int tableIdx; string sql_cmd; { m_writeAccessOngoing.fetch_add(1); //unique_lock<mutex> lck(db_mutex); sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL); #if INSTRUMENT gettimeofday(&t1, NULL); #endif lastAsset = ""; for (Value::ConstValueIterator itr = readingsValue.Begin(); itr != readingsValue.End(); ++itr) { if (!itr->IsObject()) { raiseError("appendReadings","Each reading in the readings array must be an object"); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION;", NULL, NULL, NULL); m_appendCount--; return -1; } add_row = true; // Handles - user_ts char formatted_date[LEN_BUFFER_DATE] = {0}; user_ts = (*itr)["user_ts"].GetString(); if (strcmp(user_ts, "now()") == 0) { getNow(now); user_ts = now.c_str(); } else { if (! formatDate(formatted_date, sizeof(formatted_date), user_ts) ) { raiseError("appendReadings", "Invalid date |%s|", user_ts); add_row = false; } else { user_ts = formatted_date; } } if (add_row) { // Handles - asset_code asset_code = (*itr)["asset_code"].GetString(); if (strlen(asset_code) == 0) { Logger::getLogger()->warn("Sqlite appendReadings - empty asset code value, row ignored."); stmt = NULL; } //# A different asset is managed respect the previous one if (strlen(asset_code) && lastAsset.compare(asset_code) != 0) { ReadingsCatalogue::tyReadingReference ref; ref = readCatalogue->getReadingReference(this, asset_code); readingsId = ref.tableId; Logger::getLogger()->debug("tyReadingReference '%s' %d %d ", asset_code, ref.dbId, ref.tableId); if (readingsId == -1) { Logger::getLogger()->warn("appendReadings - It was not possible to insert the row for the asset_code '%s' into the readings, row ignored.", asset_code); stmt = NULL; } else { int nReadings, idxReadings; nReadings = readCatalogue->getReadingsCount(); idxReadings = readCatalogue->getReadingPosition(ref.dbId, ref.tableId); Logger::getLogger()->debug("tyReadingReference '%s' %d %d idxReadings %d", asset_code, ref.dbId, ref.tableId, idxReadings); if (idxReadings >= stmtArraySize) { stmtArraySize = idxReadings + 1; readingsStmt.resize(stmtArraySize, nullptr); Logger::getLogger()->debug("appendReadings: thread '%s' resize size %d idx %d ", threadId.str().c_str(), stmtArraySize, readingsId); } if (readingsStmt[idxReadings] == nullptr) { string dbName = readCatalogue->generateDbName(ref.dbId); string dbReadingsName = readCatalogue->generateReadingsName(ref.dbId, readingsId); if (readingsId == 0) { // Overflow table sql_cmd = "INSERT INTO " + dbName + ".readings_" + to_string(ref.dbId) + "_overflow ( id, asset_code, user_ts, reading ) VALUES (?,'" + asset_code + "',?,?)"; } else { sql_cmd = "INSERT INTO " + dbName + "." + dbReadingsName + " ( id, user_ts, reading ) VALUES (?,?,?)"; } rc = SQLPrepare(dbHandle, sql_cmd.c_str(), &readingsStmt[idxReadings]); Logger::getLogger()->debug("tyReadingReference sql_cmd '%s' '%s' %d %d ", sql_cmd.c_str(), asset_code, ref.dbId, ref.tableId); if (rc != SQLITE_OK) { raiseError("appendReadings", sqlite3_errmsg(dbHandle)); } } stmt = readingsStmt[idxReadings]; lastAsset = asset_code; } } // Handles - reading StringBuffer buffer; Writer<StringBuffer> writer(buffer); (*itr)["reading"].Accept(writer); reading = escape(buffer.GetString()); if(stmt != NULL) { // First reading, use the id as transaction start if (itr == readingsValue.Begin()) { // Get current reading global id unsigned long startTransactionId = readCatalogue->getIncGlobalId(); // Mark transaction srtart fot this thread readCatalogue->m_tx.SetThreadTransactionStart(tid, startTransactionId); // Bind first parameter with reading id sqlite3_bind_int64 (stmt, 1, startTransactionId); } else { // Bind first parameter with reading id sqlite3_bind_int64 (stmt, 1, readCatalogue->getIncGlobalId()); } // Set parameter for user timestamp sqlite3_bind_text(stmt, 2, user_ts ,-1, SQLITE_STATIC); // Set parameter for reading JSON data sqlite3_bind_text(stmt, 3, reading.c_str(), -1, SQLITE_STATIC); retries =0; sleep_time_ms = 0; string msgError; // Retry mechanism in case SQLlite DB is locked do { // Insert the row using a lock to ensure one insert at time { sqlite3_resut = sqlite3_step(stmt); } if(sqlite3_resut != SQLITE_DONE) { msgError = ""; if (sqlite3_resut == SQLITE_LOCKED ) { msgError = "SQLITE_LOCKED"; } else if (sqlite3_resut == SQLITE_BUSY) { msgError = "SQLITE_BUSY"; } else if (sqlite3_resut != SQLITE_DONE) { msgError = "SQLITE_ERROR"; } sleep_time_ms = PREP_CMD_RETRY_BASE + (random() % PREP_CMD_RETRY_BACKOFF); retries++; if (retries >= LOG_AFTER_NERRORS) { Logger::getLogger()->warn("appendReadings - %s - " \ "asset_code '%s' readingsId %d " \ "thread '%s' dbHandle %X record " \ "%d retry number %d sleep time ms %derror '%s'", msgError.c_str(), asset_code, readingsId, threadId.str().c_str() , dbHandle, row, retries, sleep_time_ms, sqlite3_errmsg(dbHandle)); } // Put thread to sleep std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut != SQLITE_DONE)); if (sqlite3_resut == SQLITE_DONE) { row++; sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } else { raiseError("appendReadings","Inserting a row into " \ "SQLIte using a prepared command - asset_code " \ "'%s' error '%s' reading '%s' dbHandle %X", asset_code, sqlite3_errmsg(dbHandle), reading.c_str(), dbHandle); sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_appendCount--; // Clear transaction boundary for this thread readCatalogue->m_tx.ClearThreadTransaction(tid); // Finalize sqlite structures for (auto &item : readingsStmt) { if(item != nullptr) { if (sqlite3_finalize(item) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory structure - error '%s'", sqlite3_errmsg(dbHandle)); } } } return -1; } } } } sqlite3_resut = sqlite3_exec(dbHandle, "END TRANSACTION", NULL, NULL, NULL); if (sqlite3_resut != SQLITE_OK) { raiseError("appendReadings", "Executing the commit of the transaction '%s'", sqlite3_errmsg(dbHandle)); row = -1; } // Clear transaction boundary for this thread readCatalogue->m_tx.ClearThreadTransaction(tid); m_writeAccessOngoing.fetch_sub(1); //db_cv.notify_all(); } #if INSTRUMENT gettimeofday(&t2, NULL); #endif // Finalize sqlite structures for (auto &item : readingsStmt) { if(item != nullptr) { if (sqlite3_finalize(item) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory structure - error '%s'", sqlite3_errmsg(dbHandle)); } } } #if INSTRUMENT gettimeofday(&t3, NULL); #endif #if INSTRUMENT struct timeval tm; double timeT1, timeT2, timeT3; timersub(&t1, &start, &tm); timeT1 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t3, &t2, &tm); timeT3 = tm.tv_sec + ((double)tm.tv_usec / 1000000); Logger::getLogger()->debug("appendReadings end thread '%s' buffer :%10lu: count :%5d: JSON :%6.3f: inserts :%6.3f: finalize :%6.3f:", threadId.str().c_str(), strlen(readings), row, timeT1, timeT2, timeT3 ); #endif m_appendCount--; return row; } #endif #ifndef SQLITE_SPLIT_READINGS /** * Fetch a block of readings from the reading table * It might not work with SQLite 3 * * Fetch, used by the north side, returns timestamp in UTC. * * NOTE : it expects to handle a date having a fixed format * with milliseconds, microseconds and timezone expressed, * like for example : * * 2019-01-11 15:45:01.123456+01:00 */ bool Connection::fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet) { char sqlbuffer[5120]; char *zErrMsg = NULL; int rc; int retrieve; vector<string> asset_codes; string sql_cmd; unsigned long minGlobalId; unsigned long idWindow; unsigned long rowsCount; if (m_noReadings) { Logger::getLogger()->error("Attempt to fetch readings to plugin that has no storage for readings"); return false; } ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); { // Attaches the needed databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); if ( ! m_NewDbIdList.empty()) { readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } attachSync->unlock(); } if (id == 1) { // at the first extract, Verifies if there are data having id above the current searched window minGlobalId = readCatalogue->getMinGlobalId(this->getDbHandle()); idWindow = id + blksize; if (idWindow < minGlobalId) { id = minGlobalId; Logger::getLogger()->debug("%s - first extraction, data extracted from the id :%lu:", __FUNCTION__, id); } } // Generate a single SQL statement that using a set of UNION considers all the readings table in handling // SQL - start sql_cmd = R"( SELECT id, asset_code, reading, strftime('%Y-%m-%d %H:%M:%S', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime('%Y-%m-%d %H:%M:%f', ts, 'utc') AS ts FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; // Would like to add a LIMIT on each sub-query in the union all, however SQLITE // does not support this. Note we can not use id + blocksize as this fail if we // have holes in the id space sql_cmd_base = " SELECT id, \"_assetcode_\" asset_code, reading, user_ts, ts " \ "FROM _dbname_._tablename_ WHERE id >= " + to_string(id) + " "; // Check for any uncommitted transactions: // fetch the minimum reading id among all per thread transactions // an use it as a boundary limit. // If no pending transactions just use current global reading id as limit unsigned long safe_id = readCatalogue->m_tx.GetMinReadingId(); if (safe_id) { sql_cmd_base += "AND id < " + to_string(safe_id) + " "; } else { sql_cmd_base += "AND id < " + to_string(readCatalogue->getGlobalId()) + " "; } sql_cmd_tmp = readCatalogue->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; // Now add in ther overflow tables sql_cmd_base = " SELECT id, asset_code, reading, user_ts, ts " \ "FROM _dbname_._tablename_ WHERE id >= " + to_string(id) + " "; if (safe_id) { sql_cmd_base += "AND id < " + to_string(safe_id) + " "; } else { sql_cmd_base += "AND id < " + to_string(readCatalogue->getGlobalId()) + " "; } sql_cmd_tmp = readCatalogue->sqlConstructOverflow(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as tb ORDER BY id ASC LIMIT )" + to_string(blksize); logSQL("ReadingsFetch", sql_cmd.c_str()); sqlite3_stmt *stmt; // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, sql_cmd.c_str(),-1,&stmt,NULL); if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } else { // Call result set mapping rc = mapResultSet(stmt, resultSet, &rowsCount); if (rowsCount == 0) { // If no data were processed, it verifies if there are data having id above the current searched window minGlobalId = readCatalogue->getMinGlobalId(this->getDbHandle()); idWindow = id + blksize; if (idWindow < minGlobalId) { id = minGlobalId; // Delete result set sqlite3_finalize(stmt); // Generate a single SQL statement that using a set of UNION considers all the readings table in handling { // SQL - start sql_cmd = R"( SELECT id, asset_code, reading, strftime('%Y-%m-%d %H:%M:%S', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime('%Y-%m-%d %H:%M:%f', ts, 'utc') AS ts FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT id, \"_assetcode_\" asset_code, reading, user_ts, ts FROM _dbname_._tablename_ WHERE id >= " + to_string(id) + " and id <= " + to_string(id) + " + " + to_string(blksize) + " "; sql_cmd_tmp = readCatalogue->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as tb ORDER BY id ASC LIMIT )" + to_string(blksize); } logSQL("ReadingsFetch", sql_cmd.c_str()); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, sql_cmd.c_str(),-1,&stmt,NULL); if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet, &rowsCount); if (rowsCount != 0) { Logger::getLogger()->debug("%s - following extractions, data extracted from the id :%lu:", __FUNCTION__, id); } } } // Delete result set sqlite3_finalize(stmt); // Check result set errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } else { // Success return true; } } } #endif #ifndef SQLITE_SPLIT_READINGS /** * Perform a query against the readings table * * retrieveReadings, used by the API, returns timestamp in utc unless * otherwise requested. * */ bool Connection::retrieveReadings(const string& condition, string& resultSet) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; SQLBuffer sqlExtDummy; SQLBuffer sqlExt; SQLBuffer jsonConstraintsExt; // Extra constraints to add to where clause SQLBuffer jsonConstraints; bool isAggregate = false; bool isOptAggregate = false; const char *timezone = "utc"; string modifierExt; string modifierInt; vector<string> asset_codes; if (m_noReadings) { Logger::getLogger()->error("Attempt to retrieve readings to plugin that has no storage for readings"); return false; } ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); if (readCatalogue) { // Attaches the required databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); if ( ! m_NewDbIdList.empty()) { readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } attachSync->unlock(); } else { Logger::getLogger()->error("Readings catalogue not avialable"); } try { if (dbHandle == NULL) { raiseError("retrieve", "No SQLite 3 db connection available"); return false; } if (condition.empty()) { string sql_cmd; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); // SQL - start sql_cmd = R"( SELECT id, asset_code, reading, strftime(')" F_DATEH24_SEC R"(', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime(')" F_DATEH24_MS R"(', ts, 'utc') AS ts FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT id, \"_assetcode_\" asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; sql_cmd_base = " SELECT id, asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; sql_cmd_tmp = readCatalogue->sqlConstructOverflow(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as tb; )"; sql.append(sql_cmd.c_str()); } else { if (document.Parse(condition.c_str()).HasParseError()) { raiseError("retrieve", "Failed to parse JSON payload"); return false; } if (document.HasMember("timezone") && document["timezone"].IsString()) { timezone = document["timezone"].GetString(); } // timebucket aggregate all datapoints if (aggregateAll(document)) { return aggregateQuery(document, resultSet); } if (document.HasMember("aggregate")) { isAggregate = true; sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } // Generates the SQL for the external query if (!jsonAggregates(document, document["aggregate"], sql, jsonConstraints, isOptAggregate, true, true)) { return false; } // Generates the SQL for the internal query if (isOptAggregate) { if (!jsonAggregates(document, document["aggregate"], sqlExt, jsonConstraintsExt, isOptAggregate, true, false)) { return false; } } sql.append(" FROM "); } else if (document.HasMember("return")) { int col = 0; Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { if (strcmp(itr->GetString() ,"user_ts") == 0) { // Display without TZ expression and microseconds also sql.append(" strftime('" F_DATEH24_SEC "', user_ts, '"); sql.append(timezone); sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); sql.append(" as user_ts "); } else if (strcmp(itr->GetString() ,"ts") == 0) { // Display without TZ expression and microseconds also sql.append(" strftime('" F_DATEH24_MS "', ts, '"); sql.append(timezone); sql.append("') "); sql.append(" as ts "); } else { sql.append(itr->GetString()); } } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("retrieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("retrieve", "format must be a string"); return false; } // SQLite 3 date format. string new_format; applyColumnDateFormatLocaltime((*itr)["format"].GetString(), (*itr)["column"].GetString(), new_format, true); // Add the formatted column or use it as is sql.append(new_format); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("retrieve", "timezone must be a string"); return false; } // SQLite3 doesnt support time zone formatting const char *tz = (*itr)["timezone"].GetString(); if (strncasecmp(tz, "utc", 3) == 0) { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Extract milliseconds and microseconds for the user_ts fields sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'utc') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", 'utc')"); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } } else if (strncasecmp(tz, "localtime", 9) == 0) { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Extract milliseconds and microseconds for the user_ts fields sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", 'localtime')"); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } } else { raiseError("retrieve", "SQLite3 plugin does not support timezones in queries"); return false; } } else { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Extract milliseconds and microseconds for the user_ts fields sql.append("strftime('" F_DATEH24_SEC "', user_ts, '"); sql.append(timezone); sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", '"); sql.append(timezone); sql.append("')"); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } sql.append(" FROM "); } else { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } sql.append("id, asset_code, reading, strftime('" F_DATEH24_SEC "', user_ts, '"); sql.append(timezone); sql.append("') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime('" F_DATEH24_MS "', ts, '"); sql.append(timezone); sql.append("') AS ts FROM "); } { // Identifies the asset_codes used in the query if (document.HasMember("where")) { jsonWhereClause(document["where"], sqlExtDummy, asset_codes); } string sql_cmd; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); // SQL - start sql_cmd = R"( ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_overflow_base; string sql_cmd_tmp; // Specific optimization for the count operation if (isOptAggregate) { const char *queryTmp = sqlExt.coalesce(); sql_cmd_base = " SELECT "; sql_cmd_base += queryTmp; if (! strstr(queryTmp, "ROWID")) sql_cmd_base += ", ROWID"; if (! strstr(queryTmp, "asset_code")) sql_cmd_base += ", asset_code"; sql_cmd_base += ", id, reading, user_ts, ts "; sql_cmd_overflow_base = sql_cmd_base; StringReplaceAll (sql_cmd_base, "asset_code", " \"_assetcode_\" .assetcode. "); sql_cmd_base += " FROM _dbname_._tablename_ "; sql_cmd_overflow_base += " FROM _dbname_._tablename_ "; delete[] queryTmp; } else { sql_cmd_base = " SELECT ROWID, id, \"_assetcode_\" asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; sql_cmd_overflow_base = " SELECT ROWID, id, asset_code, reading, user_ts, ts FROM _dbname_._tablename_ "; } sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, asset_codes); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCatalogue->sqlConstructOverflow(sql_cmd_overflow_base, asset_codes, false, isOptAggregate); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; sql.append(sql_cmd.c_str()); } if (document.HasMember("where")) { sql.append(" WHERE "); if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes)) { return false; } } else { raiseError("retrieve", "JSON does not contain where clause"); return false; } if (! jsonConstraints.isEmpty()) { sql.append(" AND "); const char *jsonBuf = jsonConstraints.coalesce(); sql.append(jsonBuf); delete[] jsonBuf; } } else if (isAggregate) { /* * Performance improvement: force sqlite to use an index * if we are doing an aggregate and have no where clause. */ sql.append(" WHERE id = id"); } if (!jsonModifiers(document, sql, true)) { return false; } } sql.append(';'); const char *query = sql.coalesce(); int rc; sqlite3_stmt *stmt; logSQL("ReadingsRetrieve", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); // Release memory for 'query' var delete[] query; if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } // Success return true; } catch (exception e) { raiseError("retrieve", "Internal error: %s", e.what()); return false; } } #endif #ifndef SQLITE_SPLIT_READINGS /** * Purge readings from the reading table */ unsigned int Connection::purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& result) { long unsentPurged = 0; long unsentRetained = 0; long numReadings = 0; unsigned long rowidLimit = 0, minrowidLimit = 0, maxrowidLimit = 0, rowidMin; struct timeval startTv, endTv; int blocks = 0; bool flag_retain; char *zErrMsg = NULL; vector<string> assetCodes; if (m_noReadings) { Logger::getLogger()->error("Attempt to purge readings from plugin that has no storage for readings"); return 0; } Logger *logger = Logger::getLogger(); ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); { // Attaches the needed databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); if ( ! m_NewDbIdList.empty()) { readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } attachSync->unlock(); } flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) { flag_retain = true; } Logger::getLogger()->debug("%s - flags %X flag_retain %d sent :%ld:", __FUNCTION__, flags, flag_retain, sent); // Prepare empty result result = "{ \"removed\" : 0, "; result += " \"unsentPurged\" : 0, "; result += " \"unsentRetained\" : 0, "; result += " \"readings\" : 0, "; result += " \"method\" : \"rows\", "; result += " \"duration\" : 0 }"; logger->info("Purge starting..."); gettimeofday(&startTv, NULL); /* * We fetch the current rowid and limit the purge process to work on just * those rows present in the database when the purge process started. * This provents us looping in the purge process if new readings become * eligible for purging at a rate that is faster than we can purge them. */ string sql_cmd; string sql_cmd_tmp; // Generate a single SQL statement that using a set of UNION considers all the readings table in handling // SQL - start sql_cmd = R"( SELECT MAX(rowid) FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base = " SELECT MAX(rowid) rowid FROM _dbname_._tablename_ "; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; int rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &rowidLimit, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phase 0, fetching rowid limit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } maxrowidLimit = rowidLimit; Logger::getLogger()->debug("purgeReadings rowidLimit %lu", rowidLimit); // Generate a single SQL statement that using a set of UNION considers all the readings table in handling // SQL - start sql_cmd = R"( SELECT MIN(rowid) FROM ( )"; // SQL - union of all the readings tables sql_cmd_base = " SELECT MIN(rowid) rowid FROM _dbname_._tablename_ "; sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; Logger::getLogger()->debug("%s - SELECT MIN - '%s'", __FUNCTION__, sql_cmd.c_str() ); rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &minrowidLimit, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching minrowid limit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } Logger::getLogger()->debug("purgeReadings minrowidLimit %lu", minrowidLimit); if (age == 0) { /* * An age of 0 means remove the oldest hours data. * So set age based on the data we have and continue. */ string sql_cmd; // Generate a single SQL statement that using a set of UNION considers all the readings table in handling // SQL - start sql_cmd = R"( SELECT (strftime('%s','now', 'utc') - strftime('%s', MIN(user_ts)))/360 FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT MIN(user_ts) user_ts FROM _dbname_._tablename_ WHERE rowid <= " + to_string(rowidLimit); ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; SQLBuffer oldest; oldest.append(sql_cmd); oldest.append(';'); const char *query = oldest.coalesce(); char *zErrMsg = NULL; int rc; int purge_readings = 0; // Exec query and get result in 'purge_readings' via 'selectCallback' rc = SQLexec(dbHandle, "readings", query, selectCallback, &purge_readings, &zErrMsg); // Release memory for 'query' var delete[] query; if (rc == SQLITE_OK) { age = purge_readings; } else { raiseError("purge - phase 1", zErrMsg); sqlite3_free(zErrMsg); return 0; } Logger::getLogger()->debug("purgeReadings purge_readings %d age %d", purge_readings, age); } Logger::getLogger()->debug("%s - rowidLimit :%lu: maxrowidLimit :%lu: maxrowidLimit :%lu: age :%lu:", __FUNCTION__, rowidLimit, maxrowidLimit, minrowidLimit, age); { /* * Refine rowid limit to just those rows older than age hours. */ char *zErrMsg = NULL; int rc; unsigned long l = minrowidLimit; unsigned long r; if (flag_retain) { r = min(sent, rowidLimit); } else { r = rowidLimit; } r = max(r, l); logger->debug ("%s line %d - l=%u, r=%u, sent=%u, rowidLimit=%u, minrowidLimit=%u, flag_retain=%u", __FUNCTION__, __LINE__, l, r, sent, rowidLimit, minrowidLimit, flag_retain); if (l == r) { logger->info("No data to purge: min_id == max_id == %u", minrowidLimit); return 0; } unsigned long m=l; while (l <= r) { unsigned long midRowId = 0; unsigned long prev_m = m; m = l + (r - l) / 2; if (prev_m == m) break; // e.g. select id from readings where rowid = 219867307 AND user_ts < datetime('now' , '-24 hours', 'utc'); string sql_cmd; // Generate a single SQL statement that using a set of UNION considers all the readings table in handling { // SQL - start // MIN is used to ensure just 1 row is returned sql_cmd = R"( select id FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT id FROM _dbname_._tablename_ WHERE rowid = " + to_string(m) + " AND user_ts < datetime('now' , '-" +to_string(age) + " hours')"; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; } SQLBuffer sqlBuffer; sqlBuffer.append(sql_cmd); sqlBuffer.append(';'); const char *query = sqlBuffer.coalesce(); rc = SQLexec(dbHandle, "readings", query, rowidCallback, &midRowId, &zErrMsg); delete[] query; if (rc != SQLITE_OK) { raiseError("purge - phase 1, fetching midRowId ", zErrMsg); sqlite3_free(zErrMsg); return 0; } if (midRowId == 0) // mid row doesn't satisfy given condition for user_ts, so discard right/later half and look in left/earlier half { // search in earlier/left half r = m - 1; // The m position should be skipped as midRowId is 0 m = r; } else //if (l != m) { // search in later/right half l = m + 1; } } rowidLimit = m; Logger::getLogger()->debug("%s - rowidLimit :%lu: minrowidLimit :%lu: maxrowidLimit :%lu:", __FUNCTION__, rowidLimit, minrowidLimit, maxrowidLimit); if (minrowidLimit == rowidLimit) { logger->info("No data to purge"); return 0; } rowidMin = minrowidLimit; Logger::getLogger()->debug("%s - m :%lu: rowidMin :%lu: ",__FUNCTION__ ,m, rowidMin); } //logger->info("Purge collecting unsent row count"); if ( ! flag_retain ) { char *zErrMsg = NULL; int rc; int lastPurgedId; string sql_cmd; // Generate a single SQL statement that using a set of UNION considers all the readings table in handling { // SQL - start // MIN is used to ensure just 1 row is returned sql_cmd = R"( select id FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT id FROM _dbname_._tablename_ WHERE rowid = " + to_string(rowidLimit) + " "; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; } SQLBuffer idBuffer; idBuffer.append(sql_cmd); idBuffer.append(';'); const char *idQuery = idBuffer.coalesce(); rc = SQLexec(dbHandle, "readings", idQuery, rowidCallback, &lastPurgedId, &zErrMsg); // Release memory for 'idQuery' var delete[] idQuery; if (rc != SQLITE_OK) { raiseError("purge - phase 0, fetching rowid limit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } if (sent != 0 && lastPurgedId > sent) // Unsent readings will be purged { // Get number of unsent rows we are about to remove int unsent = rowidLimit - sent; unsentPurged = unsent; } Logger::getLogger()->debug("%s - lastPurgedId %d unsentPurged :%ld:",__FUNCTION__, lastPurgedId, unsentPurged); } if (m_writeAccessOngoing) { while (m_writeAccessOngoing) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); } } unsigned int deletedRows = 0; zErrMsg = NULL; unsigned long rowsAffected; unsigned int totTime=0, prevBlocks=0, prevTotTime=0; logger->info("Purge about to delete readings # %ld to %ld", rowidMin, rowidLimit); while (rowidMin < rowidLimit) { blocks++; rowidMin += purgeBlockSize; if (rowidMin > rowidLimit) { rowidMin = rowidLimit; } SQLBuffer sql; sql.append("DELETE FROM _dbname_._tablename_ WHERE rowid <= "); sql.append(rowidMin); sql.append(" AND user_ts < datetime('now' , '-" +to_string(age) + " hours')"); sql.append(';'); const char *query = sql.coalesce(); logSQL("ReadingsPurge", query); int rc; { //unique_lock<mutex> lck(db_mutex); // if (m_writeAccessOngoing) db_cv.wait(lck); START_TIME; rc = readCat->purgeAllReadings(dbHandle, query ,&zErrMsg, &rowsAffected); END_TIME; logger->debug("%s - DELETE sql '%s' rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); // Release memory for 'query' var delete[] query; totTime += usecs; if(usecs > 150000) { std::this_thread::sleep_for(std::chrono::milliseconds(100 + usecs/1000)); } } if (rc != SQLITE_OK) { raiseError("purge - phase 3", zErrMsg); sqlite3_free(zErrMsg); return 0; } // Get db changes deletedRows += rowsAffected; logger->debug("%s - Purge delete block #%d with %d readings", __FUNCTION__, blocks, rowsAffected); if(blocks % RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS == 0) { int prevAvg = prevTotTime/(prevBlocks?prevBlocks:1); int currAvg = (totTime-prevTotTime)/(blocks-prevBlocks); int avg = ((prevAvg?prevAvg:currAvg)*5 + currAvg*5) / 10; // 50% weightage for long term avg and 50% weightage for current avg prevBlocks = blocks; prevTotTime = totTime; int deviation = abs(avg - TARGET_PURGE_BLOCK_DEL_TIME); logger->debug("blocks=%d, totTime=%d usecs, prevAvg=%d usecs, currAvg=%d usecs, avg=%d usecs, TARGET_PURGE_BLOCK_DEL_TIME=%d usecs, deviation=%d usecs", blocks, totTime, prevAvg, currAvg, avg, TARGET_PURGE_BLOCK_DEL_TIME, deviation); if (deviation > TARGET_PURGE_BLOCK_DEL_TIME/10) { float ratio = (float)TARGET_PURGE_BLOCK_DEL_TIME / (float)avg; if (ratio > 2.0) ratio = 2.0; if (ratio < 0.5) ratio = 0.5; purgeBlockSize = (float)purgeBlockSize * ratio; purgeBlockSize = purgeBlockSize / PURGE_BLOCK_SZ_GRANULARITY * PURGE_BLOCK_SZ_GRANULARITY; if (purgeBlockSize < MIN_PURGE_DELETE_BLOCK_SIZE) purgeBlockSize = MIN_PURGE_DELETE_BLOCK_SIZE; if (purgeBlockSize > MAX_PURGE_DELETE_BLOCK_SIZE) purgeBlockSize = MAX_PURGE_DELETE_BLOCK_SIZE; logger->debug("Changed purgeBlockSize to %d", purgeBlockSize); } std::this_thread::sleep_for(std::chrono::milliseconds(100)); } Logger::getLogger()->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); } while (rowidMin < rowidLimit); unsentRetained = maxrowidLimit - rowidLimit; numReadings = maxrowidLimit +1 - minrowidLimit - deletedRows; if (sent == 0) // Special case when not north process is used { unsentPurged = deletedRows; } if (deletedRows) { std::thread th(&ReadingsCatalogue::loadEmptyAssetReadingCatalogue,ReadingsCatalogue::getInstance(),false); th.detach(); } gettimeofday(&endTv, NULL); unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; convert << " \"readings\" : " << numReadings << ", "; convert << " \"method\" : \"age\", "; convert << " \"duration\" : " << duration << " }"; result = convert.str(); logger->info("Purge process complete in %d blocks in %lduS", blocks, duration); Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result '%s'", __FUNCTION__, age, flags, flag_retain, result.c_str() ); return deletedRows; } #endif #ifndef SQLITE_SPLIT_READINGS /** * Purge readings from the reading table */ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsigned int flags, unsigned long sent, std::string& result) { unsigned long deletedRows = 0, unsentPurged = 0, unsentRetained = 0, numReadings = 0; unsigned long limit = 0; string sql_cmd; vector<string> assetCodes; bool flag_retain; struct timeval startTv, endTv; // rowidCallback expects unsigned long unsigned long rowcount, minId, maxId; unsigned long rowsAffected; unsigned long deletePoint; char *zErrMsg = NULL; int rc; Logger *logger = Logger::getLogger(); if (m_noReadings) { logger->error("Attempt to purge readings from plugin that has no storage for readings"); return 0; } gettimeofday(&startTv, NULL); ostringstream threadId; threadId << std::this_thread::get_id(); ReadingsCatalogue *readCatalogue = ReadingsCatalogue::getInstance(); { // Attaches the needed databases if the queue is not empty AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); if ( ! m_NewDbIdList.empty()) { readCatalogue->connectionAttachDbList(this->getDbHandle(), m_NewDbIdList); } attachSync->unlock(); } flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) { flag_retain = true; } Logger::getLogger()->debug("%s - flags %X flag_retain %d sent :%ld:", __FUNCTION__, flags, flag_retain, sent); logger->info("Purge by Rows called"); if (flag_retain) { limit = sent; logger->info("Sent is %lu", sent); } logger->info("Purge by Rows called with flag_retain %d, rows %lu, limit %lu", flag_retain, rows, limit); rowsAffected = 0; // Don't save unsent rows { // Calc rowcount // Generate a single SQL statement that using a set of UNION considers all the readings table in handling { // SQL - start // MIN is used to ensure just 1 row is returned sql_cmd = R"( SELECT SUM(rowid) FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " select count(rowid) rowid FROM _dbname_._tablename_ "; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; } rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &rowcount, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching row count", zErrMsg); sqlite3_free(zErrMsg); return 0; } } { // Calc maxId // Generate a single SQL statement that using a set of UNION considers all the readings table in handling { // SQL - start // MIN is used to ensure just 1 row is returned sql_cmd = R"( SELECT MAX(id) FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT MAX(id) id FROM _dbname_._tablename_ "; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; } rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &maxId, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching maximum id", zErrMsg); sqlite3_free(zErrMsg); return 0; } } numReadings = rowcount; rowsAffected = 0; do { if (rowcount <= rows) { logger->info("Row count %d is less than required rows %d", rowcount, rows); break; } { // Calc minId // Generate a single SQL statement that using a set of UNION considers all the readings table in handling { // SQL - start // MIN is used to ensure just 1 row is returned sql_cmd = R"( SELECT MIN(rowid) FROM ( )"; // SQL - union of all the readings tables string sql_cmd_base; string sql_cmd_tmp; sql_cmd_base = " SELECT MIN(rowid) rowid FROM _dbname_._tablename_ "; ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); sql_cmd_tmp = readCat->sqlConstructMultiDb(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; sql_cmd_tmp = readCat->sqlConstructOverflow(sql_cmd_base, assetCodes, true); sql_cmd += sql_cmd_tmp; // SQL - end sql_cmd += R"( ) as readings_1 )"; logger->debug("%s - SELECT MIN - sql_cmd '%s' ", __FUNCTION__, sql_cmd.c_str() ); } rc = SQLexec(dbHandle, "readings", sql_cmd.c_str(), rowidCallback, &minId, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching minimum id", zErrMsg); sqlite3_free(zErrMsg); return 0; } } unsigned long deletePoint = minId + min(100000UL, rows); if (maxId - deletePoint < rows || deletePoint > maxId) deletePoint = maxId - rows; // Do not delete if (flag_retain) { if (limit < deletePoint) { deletePoint = limit; } } SQLBuffer sql; logger->info("RowCount %lu, Max Id %lu, min Id %lu, delete point %lu", rowcount, maxId, minId, deletePoint); sql.append("DELETE FROM _dbname_._tablename_ WHERE id <= "); sql.append(deletePoint); const char *query = sql.coalesce(); { ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); //unique_lock<mutex> lck(db_mutex); // if (m_writeAccessOngoing) db_cv.wait(lck); // Exec DELETE query: no callback, no resultset rc = readCat->purgeAllReadings(dbHandle, query ,&zErrMsg, &rowsAffected); logger->info("%s - DELETE - query '%s' rowsAffected :%ld:", __FUNCTION__, query ,rowsAffected); deletedRows += rowsAffected; numReadings -= rowsAffected; rowcount -= rowsAffected; sqlite3_free(zErrMsg); // Release memory for 'query' var delete[] query; logger->debug(" Deleted :%lu: rows", rowsAffected); if (rowsAffected == 0) { break; } if (limit != 0 && sent != 0) { unsentPurged = deletePoint - sent; } else if (!limit) { unsentPurged += rowsAffected; } } std::this_thread::sleep_for(std::chrono::milliseconds(1)); } while (rowcount > rows); if (limit) { unsentRetained = numReadings - rows; } if (deletedRows) { std::thread th(&ReadingsCatalogue::loadEmptyAssetReadingCatalogue,ReadingsCatalogue::getInstance(),false); th.detach(); } gettimeofday(&endTv, NULL); unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; convert << " \"readings\" : " << numReadings << ", "; convert << " \"method\" : \"rows\", "; convert << " \"duration\" : " << duration << " }"; result = convert.str(); Logger::getLogger()->debug("%s - Purge by Rows complete - rows :%lu: flag :%x: sent :%lu: numReadings :%lu: rowsAffected :%u: result '%s'", __FUNCTION__, rows, flags, sent, numReadings, rowsAffected, result.c_str() ); return deletedRows; } #endif /** * SQLIte wrapper to retry statements when the database error occurs * */ int Connection::SQLPrepare(sqlite3 *dbHandle, const char *sqlCmd, sqlite3_stmt **readingsStmt) { int retries = 0, rc; do { rc = sqlite3_prepare_v2(dbHandle, sqlCmd, -1, readingsStmt, NULL); if (rc != SQLITE_OK) { if (retries >= LOG_AFTER_NERRORS){ Logger::getLogger()->warn("SQLPrepare - error '%s' dbHandle %X sqlCmd '%s' retry %d of %d", sqlite3_errmsg(dbHandle), dbHandle, sqlCmd, rc, MAX_RETRIES); } retries++; int interval = (retries * RETRY_BACKOFF); usleep(interval); // sleep retries milliseconds } } while (retries < MAX_RETRIES && (rc != SQLITE_OK)); if (rc != SQLITE_OK) { Logger::getLogger()->error("SQLPrepare - Database error after maximum retries"); } return rc; } /** * Purge readings by asset or purge all readings * * @param asset The asset name to purge * If empty all assets will be removed * @return The number of removed asset records */ unsigned int Connection::purgeReadingsAsset(const string& asset) { char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; if (m_noReadings) { return 0; } ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); if (readCat == NULL) { return 0; } if (asset.empty()) { SQLBuffer sql; unsigned long rowsAffected; sql.append("DELETE FROM _dbname_._tablename_;"); const char *query = sql.coalesce(); logSQL("ReadingsAssetPurge", query); if (m_writeAccessOngoing) { while (m_writeAccessOngoing) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); } } // Purge all readings data in all db tables // zErrMsg is freed if rc != SQLITE_OK) rc = readCat->purgeAllReadings(dbHandle, query ,&zErrMsg, &rowsAffected); // Release memory for 'query' var delete[] query; if (rc != SQLITE_OK) { raiseError("ReadingsAssetPurge", sqlite3_errmsg(dbHandle)); sqlite3_free(zErrMsg); return 0; } return rowsAffected; } else { ReadingsCatalogue::tyReadingReference ref; ref = readCat->getReadingReference(this, asset.c_str()); string dbReadingsName; string dbName; string query = "DELETE FROM " + readCat->generateDbName(ref.dbId); query += "." + readCat->generateReadingsName(ref.dbId, ref.tableId) + ";"; // Execute SQL statement via SQLExec wrapper rc = readCat->SQLExec(dbHandle, query.c_str(), &zErrMsg); if (rc != SQLITE_OK) { raiseError("ReadingsAssetPurge", sqlite3_errmsg(dbHandle)); sqlite3_free(zErrMsg); return 0; } readCat->loadEmptyAssetReadingCatalogue(); // Get numbwer of affected rows return (unsigned int)sqlite3_changes(dbHandle); } } ================================================ FILE: C/plugins/storage/sqlite/common/readings_catalogue.cpp ================================================ /* * Fledge storage service - Readings catalogue handling * * Copyright (c) 2020 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli, Massimiliano Pinto */ #include <vector> #include <algorithm> #include <utils.h> #include <sys/stat.h> #include <libgen.h> #include <string_utils.h> #include <connection.h> #include <connection_manager.h> #include <sqlite_common.h> #include "readings_catalogue.h" #include <purge_configuration.h> #include "json_utils.h" using namespace std; using namespace rapidjson; // Log set, clear and get of transaction boundaries #define LOG_TX_BOUNDARIES 0 /** * Constructor * * This is never explicitly called as the ReadingsCatalogue is a * singleton class. */ ReadingsCatalogue::ReadingsCatalogue() : m_nextOverflow(1), m_maxOverflowUsed(0) { } /** * Logs an error. A variable argument function that * uses a printf format string to log an error message with the * associated operation. * * @param operation The operastion in progress * @param reason A printf format string with the error message text */ void ReadingsCatalogue::raiseError(const char *operation, const char *reason, ...) { char tmpbuf[512]; va_list ap; va_start(ap, reason); vsnprintf(tmpbuf, sizeof(tmpbuf), reason, ap); va_end(ap); Logger::getLogger()->error("ReadingsCatalogues: %s during operation %s", tmpbuf, operation); } /** * Retrieve the information from the persistent storage: * global id * last created database * * @param dbHandle Database connection to use for the operations * */ bool ReadingsCatalogue::configurationRetrieve(sqlite3 *dbHandle) { string sql_cmd; int rc; int id; int nCols; sqlite3_stmt *stmt; // Retrieves the global_id from thd DB sql_cmd = " SELECT global_id, db_id_Last, n_readings_per_db, n_db_preallocate FROM " READINGS_DB ".configuration_readings "; if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) { raiseError("configurationRetrieve", sqlite3_errmsg(dbHandle)); return false; } if (SQLStep(stmt) != SQLITE_ROW) { m_ReadingsGlobalId = 1; m_dbIdLast = 0; m_storageConfigCurrent.nReadingsPerDb = m_storageConfigApi.nReadingsPerDb; m_storageConfigCurrent.nDbPreallocate = m_storageConfigApi.nDbPreallocate; sql_cmd = " INSERT INTO " READINGS_DB ".configuration_readings VALUES (" + to_string(m_ReadingsGlobalId) + "," + to_string(m_dbIdLast) + "," + to_string(m_storageConfigCurrent.nReadingsPerDb) + "," + to_string(m_storageConfigCurrent.nDbPreallocate) + ")"; if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { raiseError("configurationRetrieve", sqlite3_errmsg(dbHandle)); return false; } } else { nCols = sqlite3_column_count(stmt); m_ReadingsGlobalId = sqlite3_column_int64(stmt, 0); m_dbIdLast = sqlite3_column_int(stmt, 1); m_storageConfigCurrent.nReadingsPerDb = sqlite3_column_int(stmt, 2); m_storageConfigCurrent.nDbPreallocate = sqlite3_column_int(stmt, 3); } Logger::getLogger()->debug("configurationRetrieve: ReadingsGlobalId %lu dbIdLast %d ", m_ReadingsGlobalId.load(), m_dbIdLast); sqlite3_finalize(stmt); return true; } /** * Retrieves the global id stored in SQLite and if it is not possible * it calculates the value from the readings tables executing a max(id) on each table. * * Once retrieved or calculated, * It updates the value into SQlite to -1 to force a calculation at the next plugin init (Fledge starts) * in the case the proper value was not stored as the plugin shutdown (when Fledge is stopped) was not called. * */ bool ReadingsCatalogue::evaluateGlobalId () { string sql_cmd; int rc; long id; int nCols; sqlite3_stmt *stmt; sqlite3 *dbHandle; ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Evaluate Global ID"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); // Retrieves the global_id from thd DB sql_cmd = " SELECT global_id FROM " READINGS_DB ".configuration_readings "; if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) { raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); manager->release(connection); return false; } if (SQLStep(stmt) != SQLITE_ROW) { m_ReadingsGlobalId = 1; sql_cmd = " INSERT INTO " READINGS_DB ".configuration_readings VALUES (" + to_string(m_ReadingsGlobalId) + "," + "0" + "," + to_string(m_storageConfigApi.nReadingsPerDb) + "," + to_string(m_storageConfigApi.nDbPreallocate) + ")"; if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); manager->release(connection); return false; } } else { nCols = sqlite3_column_count(stmt); m_ReadingsGlobalId = sqlite3_column_int64(stmt, 0); } id = m_ReadingsGlobalId; Logger::getLogger()->debug("evaluateGlobalId - global id from the DB %lu", id); if (m_ReadingsGlobalId == -1) { m_ReadingsGlobalId = calculateGlobalId (dbHandle); } id = m_ReadingsGlobalId; Logger::getLogger()->debug("evaluateGlobalId - global id from the DB %lu", id); // Set the global_id in the DB to -1 to force a calculation at the restart // in case the shutdown is not executed and the proper value stored sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET global_id=-1;"; if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { raiseError("evaluateGlobalId", sqlite3_errmsg(dbHandle)); manager->release(connection); return false; } sqlite3_finalize(stmt); manager->release(connection); return true; } /** * Stores the global id into SQlite * */ bool ReadingsCatalogue::storeGlobalId () { string sql_cmd; int rc; int id; int nCols; sqlite3_stmt *stmt; sqlite3 *dbHandle; unsigned long i; i = m_ReadingsGlobalId; Logger::getLogger()->debug("storeGlobalId m_globalId %lu ", i); ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Store Global ID"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET global_id=" + to_string(m_ReadingsGlobalId); if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { raiseError("storeGlobalId", sqlite3_errmsg(dbHandle)); manager->release(connection); return false; } manager->release(connection); return true; } /** * Calculates the value from the readings tables executing a max(id) on each table. * * @param dbHandle Database connection to use for the operations * */ long ReadingsCatalogue::calculateGlobalId (sqlite3 *dbHandle) { string sql_cmd; string dbReadingsName; string dbName; int rc; unsigned long id; int nCols; sqlite3_stmt *stmt; id = 1; // Prepare the sql command to calculate the global id from the rows in the DB sql_cmd = R"( SELECT max(id) id FROM ( )"; bool firstRow = true; if (m_AssetReadingCatalogue.empty()) { string dbReadingsName = generateReadingsName(1, 1); sql_cmd += " SELECT max(id) id FROM " READINGS_DB "." + dbReadingsName + " "; } else { for (auto &item : m_AssetReadingCatalogue) { if (item.second.getTable() != 0) { if (!firstRow) { sql_cmd += " UNION "; } dbName = generateDbName(item.second.getDatabase()); dbReadingsName = generateReadingsName(item.second.getDatabase(), item.second.getTable()); sql_cmd += " SELECT max(id) id FROM " + dbName + "." + dbReadingsName + " "; firstRow = false; } } // Now add overflow tables for (int i = 1; i <= m_maxOverflowUsed; i++) { if (!firstRow) { sql_cmd += " UNION "; } dbName = generateDbName(i); dbReadingsName = generateReadingsName(i, 0); sql_cmd += " SELECT max(id) id FROM " + dbName + "." + dbReadingsName + " "; firstRow = false; } } sql_cmd += ") AS tb"; if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) { raiseError("calculateGlobalId", sqlite3_errmsg(dbHandle)); return false; } if (SQLStep(stmt) != SQLITE_ROW) { raiseError("calculateGlobalId SQLStep", sqlite3_errmsg(dbHandle)); id = 1; } else { nCols = sqlite3_column_count(stmt); id = sqlite3_column_int64(stmt, 0); // m_globalId stores then next value to be used id++; } Logger::getLogger()->debug("calculateGlobalId - global id evaluated %d", id); sqlite3_finalize(stmt); return (id); } /** * Calculates the minimum id from the readings tables executing a min(id) on each table * * @param dbHandle Database connection to use for the operations * */ long ReadingsCatalogue::getMinGlobalId (sqlite3 *dbHandle) { string sql_cmd; string dbReadingsName; string dbName; int rc; unsigned long id; int nCols; sqlite3_stmt *stmt; id = 1; // Prepare the sql command to calculate the global id from the rows in the DB { sql_cmd = R"( SELECT min(id) id FROM ( )"; bool firstRow = true; if (m_AssetReadingCatalogue.empty()) { string dbReadingsName = generateReadingsName(1, 1); sql_cmd += " SELECT min(id) id FROM " READINGS_DB "." + dbReadingsName + " "; } else { for (auto &item : m_AssetReadingCatalogue) { if (item.second.getTable() != 0) { if (!firstRow) { sql_cmd += " UNION "; } dbName = generateDbName(item.second.getDatabase()); dbReadingsName = generateReadingsName(item.second.getDatabase(), item.second.getTable()); sql_cmd += " SELECT min(id) id FROM " + dbName + "." + dbReadingsName + " "; firstRow = false; } } // Now add overflow tables for (int i = 1; i <= m_maxOverflowUsed; i++) { if (!firstRow) { sql_cmd += " UNION "; } dbName = generateDbName(i); dbReadingsName = generateReadingsName(i, 0); sql_cmd += " SELECT min(id) id FROM " + dbName + "." + dbReadingsName + " "; firstRow = false; } } sql_cmd += ") AS tb"; } if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) { raiseError(__FUNCTION__, sqlite3_errmsg(dbHandle)); return false; } if (SQLStep(stmt) != SQLITE_ROW) { id = 0; } else { nCols = sqlite3_column_count(stmt); id = sqlite3_column_int64(stmt, 0); } Logger::getLogger()->debug("%s - global id evaluated %lu", __FUNCTION__, id); sqlite3_finalize(stmt); return (id); } /** * Loads the reading catalogue stored in SQLite into an in memory structure * */ bool ReadingsCatalogue::loadAssetReadingCatalogue() { int nCols; int tableId, dbId, maxDbID; char *asset_name; sqlite3_stmt *stmt; int rc; sqlite3 *dbHandle; ostringstream threadId; threadId << std::this_thread::get_id(); ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Load Asset Reading Catalogue"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); // loads readings catalog from the db const char *sql_cmd = R"( SELECT table_id, db_id, asset_code FROM )" READINGS_DB R"(.asset_reading_catalogue ORDER BY table_id; )"; maxDbID = 1; if (sqlite3_prepare_v2(dbHandle,sql_cmd,-1, &stmt,NULL) != SQLITE_OK) { raiseError("retrieve asset_reading_catalogue", sqlite3_errmsg(dbHandle)); manager->release(connection); return false; } else { // Iterate over all the rows in the resultSet while ((rc = SQLStep(stmt)) == SQLITE_ROW) { nCols = sqlite3_column_count(stmt); tableId = sqlite3_column_int(stmt, 0); dbId = sqlite3_column_int(stmt, 1); asset_name = (char *)sqlite3_column_text(stmt, 2); if (dbId > maxDbID) maxDbID = dbId; Logger::getLogger()->debug("loadAssetReadingCatalogue - thread '%s' reading Id %d dbId %d asset name '%s' max db Id %d", threadId.str().c_str(), tableId, dbId, asset_name, maxDbID); auto newMapValue = make_pair(asset_name,TableReference(dbId, tableId)); m_AssetReadingCatalogue.insert(newMapValue); if (tableId == 0 && dbId > m_maxOverflowUsed) // Overflow { m_maxOverflowUsed = dbId; } } sqlite3_finalize(stmt); } manager->release(connection); m_dbIdCurrent = maxDbID; Logger::getLogger()->debug("loadAssetReadingCatalogue maxdb %d", m_dbIdCurrent); return true; } /** * Add the newly create db to the list * */ void ReadingsCatalogue::setUsedDbId(int dbId) { m_dbIdList.push_back(dbId); } /** * Preallocate all the required databases: * * - Initial stage - creates the databases requested by the preallocation * - Following runs - attaches all the databases already created * */ void ReadingsCatalogue::prepareAllDbs() { int dbId, dbIdStart, dbIdEnd; Logger::getLogger()->debug("prepareAllDbs - dbIdCurrent %d dbIdLast %d nDbPreallocate %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate); if (m_dbIdLast == 0) { Logger::getLogger()->debug("prepareAllDbs - initial stage "); // Initial stage - creates the databases requested by the preallocation dbIdStart = 2; dbIdEnd = dbIdStart + m_storageConfigCurrent.nDbPreallocate - 2; int created = preallocateNewDbsRange(dbIdStart, dbIdEnd); if (created) { m_dbIdLast = dbIdStart + created - 1; } } else { Logger::getLogger()->debug("prepareAllDbs - following runs"); // Following runs - attaches all the databases for (dbId = 2; dbId <= m_dbIdLast ; dbId++ ) { m_dbIdList.push_back(dbId); } attachDbsToAllConnections(); } m_dbNAvailable = (m_dbIdLast - m_dbIdCurrent) - m_storageConfigCurrent.nDbLeftFreeBeforeAllocate; Logger::getLogger()->debug("prepareAllDbs - dbNAvailable %d", m_dbNAvailable); } /** * Create a set of databases * * @param dbIdStart Range of the database to create * @param dbIdEnd Range of the database to create * @return int The number of datbases created * */ int ReadingsCatalogue::preallocateNewDbsRange(int dbIdStart, int dbIdEnd) { int dbId; int startReadingsId; tyReadingsAvailable readingsAvailable; int created = 0; Logger::getLogger()->debug("preallocateNewDbsRange - Id start %d Id end %d ", dbIdStart, dbIdEnd); for (dbId = dbIdStart; dbId <= dbIdEnd; dbId++) { readingsAvailable = evaluateLastReadingAvailable(NULL, dbId - 1); startReadingsId = 1; if (!createNewDB(NULL, dbId, startReadingsId, NEW_DB_ATTACH_ALL)) { Logger::getLogger()->error("Failed to preallocated all databases, terminated after creating %d databases", created); break; } else { created++; } Logger::getLogger()->debug("preallocateNewDbsRange - db created %d startReadingsIdOnDB %d", dbId, startReadingsId); } return created; } /** * Generates a list of all the used databases. Note this list does not include * the first database, readings_1, onl the ohtes that have been added. * * @param dbIdList returned by reference, the list databases in use * */ void ReadingsCatalogue::getAllDbs(vector<int> &dbIdList) { int dbId; Logger::getLogger()->debug("getAllDbs - used db"); for (auto &item : m_AssetReadingCatalogue) { dbId = item.second.getDatabase(); if (dbId > 1) { if (std::find(dbIdList.begin(), dbIdList.end(), dbId) == dbIdList.end() ) { dbIdList.push_back(dbId); Logger::getLogger()->debug("getAllDbs DB %d", dbId); } } } Logger::getLogger()->debug("getAllDbs - created db"); for (auto &dbId : m_dbIdList) { if (std::find(dbIdList.begin(), dbIdList.end(), dbId) == dbIdList.end() ) { dbIdList.push_back(dbId); Logger::getLogger()->debug("getAllDbs DB created %d", dbId); } } sort(dbIdList.begin(), dbIdList.end()); } /** * Retrieve the list of newly created databases * * @param dbIdList returned by reference, the list of new databases * */ void ReadingsCatalogue::getNewDbs(vector<int> &dbIdList) { int dbId; for (auto &dbId : m_dbIdList) { if (std::find(dbIdList.begin(), dbIdList.end(), dbId) == dbIdList.end() ) { dbIdList.push_back(dbId); Logger::getLogger()->debug("getNewDbs - dbId %d", dbId); } } sort(dbIdList.begin(), dbIdList.end()); } /** * Enable WAL mode on the given database file. This method will open and then * close the database and does not use any existing connection. * * @param dbPathReadings Database path for which the WAL must be enabled * */ bool ReadingsCatalogue::enableWAL(string &dbPathReadings) { int rc; sqlite3 *dbHandle; Logger::getLogger()->debug("enableWAL on '%s'", dbPathReadings.c_str()); rc = sqlite3_open(dbPathReadings.c_str(), &dbHandle); if(rc != SQLITE_OK) { raiseError("enableWAL", sqlite3_errmsg(dbHandle)); return false; } else { // Enables the WAL feature ConnectionManager *manager = ConnectionManager::getInstance(); rc = sqlite3_exec(dbHandle, manager->getDBConfiguration().c_str(), NULL, NULL, NULL); if (rc != SQLITE_OK) { raiseError("enableWAL", sqlite3_errmsg(dbHandle)); return false; } } sqlite3_close(dbHandle); return true; } /** * Attach a database to the database connection passed to the call * * @param dbHandle Database connection to use for the operations * @param path path of the database to attach * @param alias alias to be assigned to the attached database * @param id the database ID */ bool ReadingsCatalogue::attachDb(sqlite3 *dbHandle, std::string &path, std::string &alias, int id) { int rc; string sqlCmd; bool result = true; char *zErrMsg = NULL; sqlCmd = "ATTACH DATABASE '" + path + "' AS " + alias + ";"; Logger::getLogger()->debug("attachDb - path '%s' alias '%s' cmd '%s'" , path.c_str(), alias.c_str() , sqlCmd.c_str() ); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { Logger::getLogger()->error("Failed to attach the db '%s' to the connection %X, error '%s'", path.c_str(), dbHandle, zErrMsg); sqlite3_free(zErrMsg); result = false; } // See if the overflow table exists and if not create it // This is a workaround as the schema update mechanism can't cope // with multiple readings tables createReadingsOverflowTable(dbHandle, id); return result; } /** * Detach a database from a connection * * @param dbHandle Database connection to use for the operations* * @param alias alias of the database to detach */ void ReadingsCatalogue::detachDb(sqlite3 *dbHandle, std::string &alias) { int rc; std::string sqlCmd; char *zErrMsg = nullptr; sqlCmd = "DETACH DATABASE " + alias + ";"; Logger::getLogger()->debug("%s - db '%s' cmd '%s'" ,__FUNCTION__, alias.c_str() , sqlCmd.c_str() ); rc = SQLExec (dbHandle, sqlCmd.c_str(), &zErrMsg); if (rc != SQLITE_OK) { Logger::getLogger()->error("%s - It was not possible to detach the db '%s' from the connection %X, error '%s'", __FUNCTION__, alias.c_str(), dbHandle, zErrMsg); sqlite3_free(zErrMsg); } } /** * Attaches all the defined SQlite database to all the connections and enable the WAL * * @param dbHandle Database connection to use for the operations * @param dbIdList List of database to attach * @return True of success, false on any error * */ bool ReadingsCatalogue::connectionAttachDbList(sqlite3 *dbHandle, vector<int> &dbIdList) { int dbId; string dbPathReadings; string dbAlias; int item; bool result; result = true; Logger::getLogger()->debug("connectionAttachDbList - start dbHandle %X" ,dbHandle); while (result && !dbIdList.empty()) { item = dbIdList.back(); dbPathReadings = generateDbFilePath(item); dbAlias = generateDbAlias(item); Logger::getLogger()->debug( "connectionAttachDbList - dbHandle %X dbId %d path %s alias %s", dbHandle, item, dbPathReadings.c_str(), dbAlias.c_str()); result = attachDb(dbHandle, dbPathReadings, dbAlias, item); dbIdList.pop_back(); } Logger::getLogger()->debug("connectionAttachDbList - end dbHandle %X" ,dbHandle); return result; } /** * Attaches all the defined SQlite database to all the connections and enable the WAL * * @param dbHandle Database connection to use for the operations * @return True of success, false on any error * */ bool ReadingsCatalogue::connectionAttachAllDbs(sqlite3 *dbHandle) { int dbId; string dbPathReadings; string dbAlias; vector<int> dbIdList; bool result; result = true; getAllDbs(dbIdList); for(int item : dbIdList) { dbPathReadings = generateDbFilePath(item); dbAlias = generateDbAlias(item); result = attachDb(dbHandle, dbPathReadings, dbAlias, item); if (! result) { Logger::getLogger()->error("Unable to attach all databases to the connection"); break; } Logger::getLogger()->debug("connectionAttachAllDbs - dbId %d path %s alias %s", item, dbPathReadings.c_str(), dbAlias.c_str()); } return result; } /** * Attaches all the defined SQlite database to all the connections and enable the WAL * * @return True of success, false on any error * */ bool ReadingsCatalogue::attachDbsToAllConnections() { int dbId; string dbPathReadings; string dbAlias; vector<int> dbIdList; bool result; result = true; ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Attach DBs to all connections"; connection->setUsage(usage); #endif getAllDbs(dbIdList); for (int item : dbIdList) { dbPathReadings = generateDbFilePath(item); dbAlias = generateDbAlias(item); enableWAL(dbPathReadings); // Attached the new db to the connections result = manager->attachNewDb(dbPathReadings, dbAlias); if (! result) break; Logger::getLogger()->debug("attachDbsToAllConnections - dbId %d path '%s' alias '%s'", item, dbPathReadings.c_str(), dbAlias.c_str()); } manager->release(connection); return (result); } /** * Setup the multiple readings databases/tables feature * * @param storageConfig Configuration to apply * */ void ReadingsCatalogue::multipleReadingsInit(STORAGE_CONFIGURATION &storageConfig) { sqlite3 *dbHandle; ConnectionManager *manager = ConnectionManager::getInstance(); Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Multiple readings init"; connection->setUsage(usage); #endif if (! connection->supportsReadings()) { manager->release(connection); return; } dbHandle = connection->getDbHandle(); // Enquire for the attached database limit m_attachLimit = sqlite3_limit(dbHandle, SQLITE_LIMIT_ATTACHED, -1); Logger::getLogger()->info("The version of SQLite can support %d attached databases", m_attachLimit); m_compounds = sqlite3_limit(dbHandle, SQLITE_LIMIT_COMPOUND_SELECT, -1); if (storageConfig.nDbLeftFreeBeforeAllocate < 1) { Logger::getLogger()->warn("%s - parameter nDbLeftFreeBeforeAllocate not valid, use a value >= 1, 1 used ", __FUNCTION__); storageConfig.nDbLeftFreeBeforeAllocate = 1; } if (storageConfig.nDbToAllocate < 1) { Logger::getLogger()->warn("%s - parameter nDbToAllocate not valid, use a value >= 1, 1 used ", __FUNCTION__); storageConfig.nDbToAllocate = 1; } m_storageConfigApi.nReadingsPerDb = storageConfig.nReadingsPerDb; m_storageConfigApi.nDbPreallocate = storageConfig.nDbPreallocate; m_storageConfigApi.nDbLeftFreeBeforeAllocate = storageConfig.nDbLeftFreeBeforeAllocate; m_storageConfigApi.nDbToAllocate = storageConfig.nDbToAllocate; m_storageConfigCurrent.nDbLeftFreeBeforeAllocate = storageConfig.nDbLeftFreeBeforeAllocate; m_storageConfigCurrent.nDbToAllocate = storageConfig.nDbToAllocate; try { configurationRetrieve(dbHandle); loadAssetReadingCatalogue(); preallocateReadingsTables(1); // on the first database Logger::getLogger()->debug("nReadingsPerDb %d", m_storageConfigCurrent.nReadingsPerDb); Logger::getLogger()->debug("nDbPreallocate %d", m_storageConfigCurrent.nDbPreallocate); prepareAllDbs(); applyStorageConfigChanges(dbHandle); Logger::getLogger()->debug("multipleReadingsInit - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate); storeReadingsConfiguration(dbHandle); preallocateReadingsTables(0); // on the last database evaluateGlobalId(); } catch (exception& e) { Logger::getLogger()->error("It is not possible to initialize the multiple readings handling, error '%s' ", e.what()); } manager->release(connection); } /** * Store on the database the configuration of the storage plugin * * @param dbHandle Database connection to use for the operations * */ void ReadingsCatalogue::storeReadingsConfiguration (sqlite3 *dbHandle) { string errMsg; string sql_cmd; Logger::getLogger()->debug("storeReadingsConfiguration - nReadingsPerDb %d nDbPreallocate %d", m_storageConfigCurrent.nReadingsPerDb , m_storageConfigCurrent.nDbPreallocate); sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET n_readings_per_db=" + to_string(m_storageConfigCurrent.nReadingsPerDb) + "," + "n_db_preallocate=" + to_string(m_storageConfigCurrent.nDbPreallocate) + "," + "db_id_Last=" + to_string(m_dbIdLast) + ";"; Logger::getLogger()->debug("sql_cmd '%s'", sql_cmd.c_str()); if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { errMsg = "is not possible to store the configuration about the multiple readings handling, error :"; errMsg += sqlite3_errmsg(dbHandle); raiseError("storeReadingsConfiguration", errMsg.c_str()); throw runtime_error(errMsg.c_str()); } } /** * Add all the required DBs in relation to the storage plugin configuration * * @param dbHandle Database connection to use for the operations * */ void ReadingsCatalogue::configChangeAddDb(sqlite3 *dbHandle) { string errMsg; int dbId; int startReadingsId; int startId, endId; tyReadingsAvailable readingsAvailable; startId = m_dbIdLast +1; endId = m_storageConfigApi.nDbPreallocate; Logger::getLogger()->debug("configChangeAddDb - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate); Logger::getLogger()->debug("configChangeAddDb - Id start %d Id end %d ", startId, endId); int created = 0; try { for (dbId = startId; dbId <= endId; dbId++) { readingsAvailable = evaluateLastReadingAvailable(dbHandle, dbId - 1); if (readingsAvailable.lastReadings == 0) { errMsg = "Unable to determinate used readings table while adding a database"; throw runtime_error(errMsg.c_str()); } startReadingsId = readingsAvailable.lastReadings +1; if (! createNewDB(dbHandle, dbId, startReadingsId, NEW_DB_ATTACH_ALL)) { errMsg = "Unable to add a new database"; throw runtime_error(errMsg.c_str()); } else { created++; } Logger::getLogger()->debug("configChangeAddDb - db created %d startReadingsIdOnDB %d", dbId, startReadingsId); } } catch (exception& e) { Logger::getLogger()->error("It is not possible to add the requested databases, error '%s' - removing created databases", e.what()); dbsRemove(startId , endId); } m_dbIdLast = startId + created - 1; m_storageConfigCurrent.nDbPreallocate = m_storageConfigApi.nDbPreallocate; m_dbNAvailable = (m_dbIdLast - m_dbIdCurrent) - m_storageConfigCurrent.nDbLeftFreeBeforeAllocate; } /** * Removes all the required DBs in relation to the storage plugin configuration * * @param dbHandle Database connection to use for the operations * */ void ReadingsCatalogue::configChangeRemoveDb(sqlite3 *dbHandle) { string errMsg; int dbId; int startReadingsId; tyReadingsAvailable readingsAvailable; string dbAlias; string dbPath; ConnectionManager *manager = ConnectionManager::getInstance(); Logger::getLogger()->debug("configChangeRemoveDb - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate); Logger::getLogger()->debug("configChangeRemoveDb - Id start %d Id end %d ", m_dbIdCurrent, m_storageConfigApi.nDbPreallocate); dbsRemove(m_storageConfigApi.nDbPreallocate + 1, m_dbIdLast); m_dbIdLast = m_storageConfigApi.nDbPreallocate; m_storageConfigCurrent.nDbPreallocate = m_storageConfigApi.nDbPreallocate; m_dbNAvailable = (m_dbIdLast - m_dbIdCurrent) - m_storageConfigCurrent.nDbLeftFreeBeforeAllocate; } /** * Adds all the required readings tables in relation to the storage plugin configuration * * @param dbHandle - handle of the connection to use for the database operation * @param startId - range of the readings table to create * @param endId - range of the readings table to create * */ void ReadingsCatalogue::configChangeAddTables(sqlite3 *dbHandle, int startId, int endId) { int dbId; int maxReadingUsed; int nTables; nTables = endId - startId +1; Logger::getLogger()->debug("%s - startId %d endId %d nTables %d", __FUNCTION__, startId, endId, nTables); for (dbId = 1; dbId <= m_dbIdLast ; dbId++ ) { Logger::getLogger()->debug("%s - configChangeAddTables - dbId %d startId %d nTables %d", __FUNCTION__, dbId, startId, nTables); createReadingsTables(dbHandle, dbId, startId, nTables); } m_storageConfigCurrent.nReadingsPerDb = m_storageConfigApi.nReadingsPerDb; maxReadingUsed = calcMaxReadingUsed(); m_nReadingsAvailable = m_storageConfigCurrent.nReadingsPerDb - maxReadingUsed; Logger::getLogger()->debug("%s - maxReadingUsed %d nReadingsPerDb %d m_nReadingsAvailable %d", __FUNCTION__, maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, m_nReadingsAvailable); } /** * Deletes all the required readings tables in relation to the storage plugin configuration * * @param dbHandle - handle of the connection to use for the database operation * @param startId - range of the readings table to delete * @param endId - range of the readings table to delete * */ void ReadingsCatalogue::configChangeRemoveTables(sqlite3 *dbHandle, int startId, int endId) { int dbId; int maxReadingUsed; Logger::getLogger()->debug("%s - startId %d endId %d", __FUNCTION__, startId, endId); for (dbId = 1; dbId <= m_dbIdLast ; dbId++ ) { Logger::getLogger()->debug("%s - configChangeRemoveTables - dbId %d startId %d endId %d", __FUNCTION__, dbId, startId, endId); dropReadingsTables(dbHandle, dbId, startId, endId); } m_storageConfigCurrent.nReadingsPerDb = m_storageConfigApi.nReadingsPerDb; maxReadingUsed = calcMaxReadingUsed(); m_nReadingsAvailable = m_storageConfigCurrent.nReadingsPerDb - maxReadingUsed; Logger::getLogger()->debug("%s - maxReadingUsed %d nReadingsPerDb %d m_nReadingsAvailable %d", __FUNCTION__, maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, m_nReadingsAvailable); } /** * Drops a set of readings * * @param dbHandle - handle of the connection to use for the database operation * @param dbId - database id on which the tables should be dropped * @param startId - range of the readings table to delete * @param endId - range of the readings table to delete * */ void ReadingsCatalogue::dropReadingsTables(sqlite3 *dbHandle, int dbId, int idStart, int idEnd) { string errMsg; string dropReadings, dropIdx; string dbName; string tableName; int tableId; int rc; int idx; bool newConnection; Logger::getLogger()->debug("%s - dropping tales on database id %dform id %d to %d", __FUNCTION__, dbId, idStart, idEnd); dbName = generateDbName(dbId); for (idx = idStart ; idx <= idEnd; ++idx) { tableName = generateReadingsName(dbId, idx); dropReadings = "DROP TABLE " + dbName + "." + tableName + ";"; dropIdx = "DROP INDEX " + tableName + "_ix3;"; rc = SQLExec(dbHandle, dropIdx.c_str()); if (rc != SQLITE_OK) { errMsg = sqlite3_errmsg(dbHandle); raiseError(__FUNCTION__, sqlite3_errmsg(dbHandle)); throw runtime_error(errMsg.c_str()); } rc = SQLExec(dbHandle, dropReadings.c_str()); if (rc != SQLITE_OK) { errMsg = sqlite3_errmsg(dbHandle); raiseError(__FUNCTION__, sqlite3_errmsg(dbHandle)); throw runtime_error(errMsg.c_str()); } } } /** * Deletes a range of database, detach and delete the file * * @param startId - range of the databases to delete * @param endId - range of the databases to delete * */ void ReadingsCatalogue::dbsRemove(int startId, int endId) { string errMsg; int dbId; int startReadingsId; tyReadingsAvailable readingsAvailable; string dbAlias; string dbPath; ConnectionManager *manager = ConnectionManager::getInstance(); Logger::getLogger()->debug("dbsRemove - startId %d endId %d", startId, endId); for (dbId = startId; dbId <= endId; dbId++) { dbAlias = generateDbAlias(dbId); dbPath = generateDbFilePath(dbId); Logger::getLogger()->debug("dbsRemove - db alias '%s' db path '%s'", dbAlias.c_str(), dbPath.c_str()); manager->detachNewDb(dbAlias); dbFileDelete(dbPath); } } /** * Delete a file * * @param dbPath - Full path of the file to delete * */ void ReadingsCatalogue::dbFileDelete(string dbPath) { string errMsg; bool success; Logger::getLogger()->debug("dbFileDelete - db path '%s'", dbPath.c_str()); if (remove (dbPath.c_str()) !=0) { errMsg = "Unable to remove database :" + dbPath + ":"; throw runtime_error(errMsg.c_str()); } } /** * Evaluates and applies the storage plugin configuration * * @param dbHandle handle of the connection to use for the database operations * */ bool ReadingsCatalogue::applyStorageConfigChanges(sqlite3 *dbHandle) { bool configChanged; ACTION operation; int maxReadingUsed; configChanged = false; Logger::getLogger()->debug("applyStorageConfigChanges - dbIdCurrent %d dbIdLast %d nDbPreallocate current %d requested %d nDbLeftFreeBeforeAllocate %d", m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate, m_storageConfigCurrent.nDbLeftFreeBeforeAllocate); try{ if (m_storageConfigApi.nDbPreallocate <= 2) { Logger::getLogger()->warn("applyStorageConfigChanges: parameter nDbPreallocate changed, but it is not possible to apply the change, use a larger value >= 3"); } else { operation = changesLogicDBs(m_dbIdCurrent, m_dbIdLast, m_storageConfigCurrent.nDbPreallocate, m_storageConfigApi.nDbPreallocate, m_storageConfigCurrent.nDbLeftFreeBeforeAllocate); // Database operation { if (operation == ACTION_DB_ADD) { Logger::getLogger()->debug("applyStorageConfigChanges - parameters nDbPreallocate changed, adding more databases from %d to %d", m_dbIdLast, m_storageConfigApi.nDbPreallocate); configChanged = true; configChangeAddDb(dbHandle); } else if (operation == ACTION_INVALID) { Logger::getLogger()->warn("applyStorageConfigChanges: parameter nDbPreallocate changed, but it is not possible to apply the change as there are already data stored in the database id %d, use a larger value", m_dbIdCurrent); } else if (operation == ACTION_DB_REMOVE) { Logger::getLogger()->debug("applyStorageConfigChanges - parameters nDbPreallocate changed, removing databases from %d to %d", m_storageConfigApi.nDbPreallocate, m_dbIdLast); configChanged = true; configChangeRemoveDb(dbHandle); } else { Logger::getLogger()->debug("applyStorageConfigChanges - not changes"); } } } if (m_storageConfigApi.nReadingsPerDb <= 2) { Logger::getLogger()->warn("applyStorageConfigChanges: parameter nReadingsPerDb changed, but it is not possible to apply the change, use a larger value >= 3"); } else { maxReadingUsed = calcMaxReadingUsed(); operation = changesLogicTables(maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, m_storageConfigApi.nReadingsPerDb); Logger::getLogger()->debug("%s - maxReadingUsed %d Current %d Requested %d", __FUNCTION__, maxReadingUsed, m_storageConfigCurrent.nReadingsPerDb, m_storageConfigApi.nReadingsPerDb); // Table operation { if (operation == ACTION_TB_ADD) { int startId, endId; startId = m_storageConfigCurrent.nReadingsPerDb +1; endId = m_storageConfigApi.nReadingsPerDb; Logger::getLogger()->debug("applyStorageConfigChanges - parameters nReadingsPerDb changed, adding more tables from %d to %d", startId, endId); configChanged = true; configChangeAddTables(dbHandle, startId, endId); } else if (operation == ACTION_INVALID) { Logger::getLogger()->warn("applyStorageConfigChanges: parameter nReadingsPerDb changed, but it is not possible to apply the change as there are already data stored in the table id %d, use a larger value", maxReadingUsed); } else if (operation == ACTION_TB_REMOVE) { int startId, endId; startId = m_storageConfigApi.nReadingsPerDb +1; endId = m_storageConfigCurrent.nReadingsPerDb; Logger::getLogger()->debug("applyStorageConfigChanges - parameters nReadingsPerDb changed, removing tables from %d to %d", m_storageConfigApi.nReadingsPerDb +1, m_storageConfigCurrent.nReadingsPerDb); configChanged = true; configChangeRemoveTables(dbHandle, startId, endId); } else { Logger::getLogger()->debug("applyStorageConfigChanges - not changes"); } } } if ( !configChanged) Logger::getLogger()->debug("applyStorageConfigChanges - storage parameters not changed"); } catch (exception& e) { Logger::getLogger()->error("It is not possible to apply the chnages to the multi readings handling, error '%s' ", e.what()); } return configChanged; } /** * Calculates the maximum readings id used * * @return maximum readings id used * */ int ReadingsCatalogue::calcMaxReadingUsed() { int maxReading = 0; for (auto &item : m_AssetReadingCatalogue) { if (item.second.getTable() > maxReading) maxReading = item.second.getTable(); } return (maxReading); } /** * Evaluates the operations to be executed in relation to the input parameters on the readings tables * * @param maxUsed Maximum table id used * @param Current Current table id configured * @param Request Requested configuration id * @return Operation to execute : ACTION_TB_NONE / ACTION_TB_ADD /ACTION_TB_REMOVE / ACTION_INVALID * */ ReadingsCatalogue::ACTION ReadingsCatalogue::changesLogicTables(int maxUsed ,int Current, int Request) { ACTION operation; Logger::getLogger()->debug("%s - maxUsed %d Request %d Request current %d", __FUNCTION__, maxUsed, Current, Request); operation = ACTION_TB_NONE; if (Current != Request) { if (Request > Current) { operation = ACTION_TB_ADD; } else if ((Request < Current) && (maxUsed >= Request)) { operation = ACTION_INVALID; } else if ((Request < Current) && (maxUsed < Request)) { operation = ACTION_TB_REMOVE; } } return operation; } /** * Evaluates the operations to be executed in relation to the input parameters on the databases * * @param dbIdCurrent - Current database id in use * @param dbIdLast - Latest database id created, but necessary in use * @param nDbPreallocateCurrent - Current table id configured * @param nDbPreallocateRequest - Requested configuration id * @param nDbLeftFreeBeforeAllocate - Number of database to maintain free * @return - Operation to execute : ACTION_DB_NONE / ACTION_DB_ADD / ACTION_DB_REMOVE / ACTION_INVALID * */ ReadingsCatalogue::ACTION ReadingsCatalogue::changesLogicDBs(int dbIdCurrent , int dbIdLast, int nDbPreallocateCurrent, int nDbPreallocateRequest, int nDbLeftFreeBeforeAllocate) { ACTION operation; operation = ACTION_DB_NONE; if ( nDbPreallocateCurrent != nDbPreallocateRequest) { if (nDbPreallocateRequest > dbIdLast) { operation = ACTION_DB_ADD; } else if (nDbPreallocateRequest < (dbIdCurrent + nDbLeftFreeBeforeAllocate) ) { operation = ACTION_INVALID; } else if ( (nDbPreallocateRequest >= (dbIdCurrent + nDbLeftFreeBeforeAllocate)) && (nDbPreallocateRequest < dbIdLast)) { operation = ACTION_DB_REMOVE; } } return operation; } /** * Creates all the required readings tables considering the tables already defined in the database * and the number of tables to have on each database. * * @param dbId Database Id in which the table must be created * */ void ReadingsCatalogue::preallocateReadingsTables(int dbId) { int readingsToAllocate; int readingsToCreate; int startId; if (dbId == 0 ) dbId = m_dbIdCurrent; tyReadingsAvailable readingsAvailable; string dbName; readingsAvailable.lastReadings = 0; readingsAvailable.tableCount = 0; // Identifies last readings available readingsAvailable = evaluateLastReadingAvailable(NULL, dbId); readingsToAllocate = getNReadingsAllocate(); if (readingsAvailable.tableCount < readingsToAllocate) { readingsToCreate = readingsToAllocate - readingsAvailable.tableCount; if (dbId == 1) startId = 2; else startId = 1; createReadingsTables(NULL, dbId, startId, readingsToCreate); } m_nReadingsAvailable = readingsToAllocate - getUsedTablesDbId(dbId); Logger::getLogger()->debug("preallocateReadingsTables - dbId %d nReadingsAvailable %d lastReadingsCreated %d tableCount %d", m_dbIdCurrent, m_nReadingsAvailable, readingsAvailable.lastReadings, readingsAvailable.tableCount); } /** * Generates the full path of the SQLite database from the given the id * * @param dbId Database Id for which the full path must be generated * @return Generated the full path * */ string ReadingsCatalogue::generateDbFilePath(int dbId) { string dbPathReadings; char *defaultReadingsConnection; char defaultReadingsConnectionTmp[1000]; defaultReadingsConnection = getenv("DEFAULT_SQLITE_DB_READINGS_FILE"); if (defaultReadingsConnection == NULL) { dbPathReadings = getDataDir(); } else { // dirname modify the content of the parameter strncpy ( defaultReadingsConnectionTmp, defaultReadingsConnection, sizeof(defaultReadingsConnectionTmp) ); dbPathReadings = dirname(defaultReadingsConnectionTmp); } if (dbPathReadings.back() != '/') dbPathReadings += "/"; dbPathReadings += generateDbFileName(dbId); return (dbPathReadings); } /** * Stores on the persistent storage the id of the last created database * * @param dbHandle Database connection to use for the operations * @param newDbId Id of the created database * @return True of success, false on any error * */ bool ReadingsCatalogue::latestDbUpdate(sqlite3 *dbHandle, int newDbId) { string sql_cmd; Logger::getLogger()->debug("latestDbUpdate - dbHandle %X newDbId %d", dbHandle, newDbId); { sql_cmd = " UPDATE " READINGS_DB ".configuration_readings SET db_id_Last=" + to_string(newDbId) + ";"; if (SQLExec(dbHandle, sql_cmd.c_str()) != SQLITE_OK) { raiseError("latestDbUpdate", sqlite3_errmsg(dbHandle)); return false; } } return true; } /** * Creates a new database * * @param dbHandle Database connection to use for the operations * @param newDbId If of the created database to create * @param startId Starting id for the creation of the reading tables * @param attachAllDb Type of attache operation to apply on the newly created database * @return True of success, false on any error * */ bool ReadingsCatalogue::createNewDB(sqlite3 *dbHandle, int newDbId, int startId, NEW_DB_OPERATION attachAllDb) { int rc; int nTables; int readingsToAllocate; int readingsToCreate; string sqlCmd; string dbPathReadings; string dbAlias; struct stat st; bool dbAlreadyPresent=false; bool result; bool connAllocated; Connection *connection; connAllocated = false; result = true; ConnectionManager *manager = ConnectionManager::getInstance(); // Are there enough descriptors available to create another database if (!manager->allowMoreDatabases()) { return false; } if (dbHandle == NULL) { connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Create New database"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); connAllocated = true; } // Creates the DB data file dbPathReadings = generateDbFilePath(newDbId); dbAlreadyPresent = false; if(stat(dbPathReadings.c_str(),&st) == 0) { Logger::getLogger()->info("createNewDB - database file '%s' already present, creation skipped " , dbPathReadings.c_str() ); dbAlreadyPresent = true; } else { Logger::getLogger()->debug("createNewDB - new database created '%s'", dbPathReadings.c_str()); } enableWAL(dbPathReadings); latestDbUpdate(dbHandle, newDbId); readingsToAllocate = getNReadingsAllocate(); readingsToCreate = readingsToAllocate; // Attached the new db to the connections dbAlias = generateDbAlias(newDbId); if (attachAllDb == NEW_DB_ATTACH_ALL) { Logger::getLogger()->debug("createNewDB - attach all the databases"); result = manager->attachNewDb(dbPathReadings, dbAlias); } else if (attachAllDb == NEW_DB_ATTACH_REQUEST) { Logger::getLogger()->debug("createNewDB - attach single"); result = attachDb(dbHandle, dbPathReadings, dbAlias, newDbId); result = manager->attachRequestNewDb(newDbId, dbHandle); } else if (attachAllDb == NEW_DB_DETACH) { Logger::getLogger()->debug("createNewDB - attach"); result = attachDb(dbHandle, dbPathReadings, dbAlias, newDbId); } if (result) { setUsedDbId(newDbId); if (dbAlreadyPresent) { tyReadingsAvailable readingsAvailable = evaluateLastReadingAvailable(dbHandle, newDbId); if (readingsAvailable.lastReadings == -1) { Logger::getLogger()->error("createNewDB - database file '%s' is already present but it is not possible to evaluate the readings table already present" , dbPathReadings.c_str() ); result = false; } else { readingsToCreate = readingsToAllocate - readingsAvailable.tableCount; startId = readingsAvailable.lastReadings +1; Logger::getLogger()->info("createNewDB - database file '%s' is already present, creating readings tables - from id %d n %d " , dbPathReadings.c_str(), startId, readingsToCreate); } } if (readingsToCreate > 0) { startId = 1; createReadingsTables(dbHandle, newDbId ,startId, readingsToCreate); Logger::getLogger()->info("createNewDB - database file '%s' created readings table - from id %d n %d " , dbPathReadings.c_str(), startId, readingsToCreate); } m_nReadingsAvailable = readingsToAllocate; } // Create the overflow table in the new database if it was not previosuly created createReadingsOverflowTable(dbHandle, newDbId); if (attachAllDb == NEW_DB_DETACH) { Logger::getLogger()->debug("createNewDB - deattach"); detachDb(dbHandle, dbAlias); } if (connAllocated) { manager->release(connection); } return (result); } /** * Creates a set of reading tables in the given database id * * @param dbHandle Database connection to use for the operations * @param dbId Database id on which the tables should be created * @param idStartFrom Id from with to start to create the tables * @param nTables Number of table to create * */ bool ReadingsCatalogue::createReadingsTables(sqlite3 *dbHandle, int dbId, int idStartFrom, int nTables) { string createReadings, createReadingsIdx; string dbName; string dbReadingsName; int tableId; int rc; int readingsIdx; bool newConnection; Connection *connection; Logger *logger = Logger::getLogger(); newConnection = false; ConnectionManager *manager = ConnectionManager::getInstance(); if (dbHandle == NULL) { connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Create Readings Tables"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); newConnection = true; } logger->info("Creating %d readings table in advance starting id %d", nTables, idStartFrom); dbName = generateDbName(dbId); for (readingsIdx = 0 ; readingsIdx < nTables; ++readingsIdx) { tableId = idStartFrom + readingsIdx; dbReadingsName = generateReadingsName(dbId, tableId); createReadings = R"( CREATE TABLE IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"( ( id INTEGER PRIMARY KEY AUTOINCREMENT, reading JSON NOT NULL DEFAULT '{}', user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) ); )"; createReadingsIdx = R"( CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix3 ON )" + dbReadingsName + R"( (user_ts); )"; logger->info(" Creating table '%s' sql cmd '%s'", dbReadingsName.c_str(), createReadings.c_str()); rc = SQLExec(dbHandle, createReadings.c_str()); if (rc != SQLITE_OK) { raiseError("createReadingsTables", sqlite3_errmsg(dbHandle)); if (newConnection) { manager->release(connection); } return false; } rc = SQLExec(dbHandle, createReadingsIdx.c_str()); if (rc != SQLITE_OK) { raiseError("createReadingsTables", sqlite3_errmsg(dbHandle)); if (newConnection) { manager->release(connection); } return false; } } if (newConnection) { manager->release(connection); } return true; } /** * Create the overflow reading tables in the given database id * * We should only do this once when we upgrade to the version with an * overflow table. Although this should ideally be done in the schema * update script we can't do this as we can not loop over all the * databases in that script. * * @param dbHandle Database connection to use for the operation * */ bool ReadingsCatalogue::createReadingsOverflowTable(sqlite3 *dbHandle, int dbId) { string dbReadingsName; Logger *logger = Logger::getLogger(); ConnectionManager *manager = ConnectionManager::getInstance(); string dbName = generateDbName(dbId); logger->info("Creating reading overflow table for database '%s'", dbName.c_str()); dbReadingsName = string(READINGS_TABLE) + "_" + to_string(dbId); dbReadingsName.append("_overflow"); string sqlCmd = "select count(*) from " + dbName + "." + dbReadingsName + ";"; char *errMsg; int rc = SQLExec(dbHandle, sqlCmd.c_str(), &errMsg); if (rc == SQLITE_OK) { logger->debug("Overflow table %s already exists, not attempting creation", dbReadingsName.c_str()); return true; } string createReadings = R"( CREATE TABLE IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"( ( id INTEGER PRIMARY KEY AUTOINCREMENT, asset_code CHARACTER varying(50) NOT NULL, reading JSON NOT NULL DEFAULT '{}', user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')), ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')) ); )"; string createReadingsIdx1 = R"( CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix1 ON )" + dbReadingsName + R"( (asset_code, user_ts desc); )"; string createReadingsIdx2 = R"( CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix2 ON )" + dbReadingsName + R"( (asset_code); )"; string createReadingsIdx3 = R"( CREATE INDEX IF NOT EXISTS )" + dbName + "." + dbReadingsName + R"(_ix3 ON )" + dbReadingsName + R"( (user_ts); )"; logger->info(" Creating table '%s' sql cmd '%s'", dbReadingsName.c_str(), createReadings.c_str()); rc = SQLExec(dbHandle, createReadings.c_str()); if (rc != SQLITE_OK) { raiseError("creating overflow table", sqlite3_errmsg(dbHandle)); return false; } rc = SQLExec(dbHandle, createReadingsIdx1.c_str()); if (rc != SQLITE_OK) { raiseError("creating overflow table index 1", sqlite3_errmsg(dbHandle)); return false; } rc = SQLExec(dbHandle, createReadingsIdx2.c_str()); if (rc != SQLITE_OK) { raiseError("creating overflow table index 2", sqlite3_errmsg(dbHandle)); return false; } rc = SQLExec(dbHandle, createReadingsIdx3.c_str()); if (rc != SQLITE_OK) { raiseError("creating overflow table index 3", sqlite3_errmsg(dbHandle)); return false; } return true; } /** * Evaluates the latest reading table defined in the provided database id looking at sqlite_master, the SQLite repository * * @param dbHandle Database connection to use for the operations * @param dbId Database id for which the operation must be executed * * @return - a struct containing * lastReadings = the id of the latest reading table defined in the given database id * tableCount = Number of tables given database id in the given database id */ ReadingsCatalogue::tyReadingsAvailable ReadingsCatalogue::evaluateLastReadingAvailable(sqlite3 *dbHandle, int dbId) { string dbName; int nCols; int id; char *asset_name; sqlite3_stmt *stmt; int rc; string tableName; tyReadingsAvailable readingsAvailable; Connection *connection; bool connAllocated = false; vector<int> readingsId(getNReadingsAvailable(), 0); ConnectionManager *manager = ConnectionManager::getInstance(); if (dbHandle == NULL) { connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Evaluate last reading available"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); connAllocated = true; } dbName = generateDbName(dbId); string sql_cmd = R"( SELECT name FROM )" + dbName + R"(.sqlite_master WHERE type='table' and name like 'readings_%'; )"; if (sqlite3_prepare_v2(dbHandle,sql_cmd.c_str(),-1, &stmt,NULL) != SQLITE_OK) { raiseError("evaluateLastReadingAvailable", sqlite3_errmsg(dbHandle)); readingsAvailable.lastReadings = -1; readingsAvailable.tableCount = 0; } else { // Iterate over all the rows in the resultSet readingsAvailable.lastReadings = 0; readingsAvailable.tableCount = 0; while ((rc = SQLStep(stmt)) == SQLITE_ROW) { nCols = sqlite3_column_count(stmt); tableName = (char *)sqlite3_column_text(stmt, 0); if (tableName.find_first_of("overflow") == string::npos) { id = extractReadingsIdFromName(tableName); if (id > readingsAvailable.lastReadings) readingsAvailable.lastReadings = id; readingsAvailable.tableCount++; } } Logger::getLogger()->debug("evaluateLastReadingAvailable - tableName '%s' lastReadings %d", tableName.c_str(), readingsAvailable.lastReadings); sqlite3_finalize(stmt); } if (connAllocated) { manager->release(connection); } return (readingsAvailable); } /** * Checks if there is a reading table still available to be used */ bool ReadingsCatalogue::isReadingAvailable() const { if (m_nReadingsAvailable <= 0) return false; else return true; } /** * Tracks the allocation of a reading table * */ void ReadingsCatalogue::allocateReadingAvailable() { m_nReadingsAvailable--; } /** * Allocates a reading table to the given asset_code * * @param connection Db connection to be used for the operations * @param asset_code for which the referenced readings table should be idenfied * @return the reading id associated to the provided asset_code */ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getReadingReference(Connection *connection, const char *asset_code) { tyReadingReference ref; sqlite3_stmt *stmt; string sql_cmd; int rc; sqlite3 *dbHandle; string msg; bool success; std::string escaped_asset = std::string(asset_code); std::string target ="\""; std::string replacement ="\"\""; StringReplaceAllEx(escaped_asset, target, replacement); int startReadingsId; tyReadingsAvailable readingsAvailable; ostringstream threadId; threadId << std::this_thread::get_id(); success = true; dbHandle = connection->getDbHandle(); Logger *logger = Logger::getLogger(); auto item = m_AssetReadingCatalogue.find(asset_code); if (item != m_AssetReadingCatalogue.end()) { //# The asset is already allocated to a table ref.tableId = item->second.getTable(); ref.dbId = item->second.getDatabase(); item->second.issue(); } else { Logger::getLogger()->debug("getReadingReference - before lock dbHandle %X threadId '%s'", dbHandle, threadId.str().c_str() ); AttachDbSync *attachSync = AttachDbSync::getInstance(); attachSync->lock(); ReadingsCatalogue::tyReadingReference emptyTableReference = {-1, -1}; std::string emptyAsset = {}; auto item = m_AssetReadingCatalogue.find(asset_code); if (item != m_AssetReadingCatalogue.end()) { ref.tableId = item->second.getTable(); ref.dbId = item->second.getDatabase(); item->second.issue(); } else { if (! isReadingAvailable ()) { // No Reading table available... Get empty reading table emptyTableReference = getEmptyReadingTableReference(emptyAsset); if ( !emptyAsset.empty() ) { ref = emptyTableReference; } else { //# Allocate a new block of readings table Logger::getLogger()->debug("Allocating a new db form the preallocated tables. %d preallocated tables available.", m_dbNAvailable); if (m_dbNAvailable > 0) { // DBs already pre-allocated are available m_dbIdCurrent++; m_dbNAvailable--; m_nReadingsAvailable = getNReadingsAllocate(); Logger::getLogger()->debug("Allocate dbIdCurrent %d dbIdLast %d dbNAvailable %d nReadingsAvailable %d ", m_dbIdCurrent, m_dbIdLast, m_dbNAvailable, m_nReadingsAvailable); } else { // There are no pre-allocated databases available // Allocates new DBs int dbId, dbIdStart, dbIdEnd, allocated = 0; dbIdStart = m_dbIdLast +1; dbIdEnd = m_dbIdLast + m_storageConfigCurrent.nDbToAllocate; Logger::getLogger()->debug("getReadingReference - allocate a new db - create new db - dbIdCurrent %d dbIdStart %d dbIdEnd %d", m_dbIdCurrent, dbIdStart, dbIdEnd); for (dbId = dbIdStart; dbId <= dbIdEnd; dbId++) { readingsAvailable = evaluateLastReadingAvailable(dbHandle, dbId - 1); startReadingsId = 1; success = createNewDB(dbHandle, dbId, startReadingsId, NEW_DB_ATTACH_REQUEST); if (success) { Logger::getLogger()->debug("getReadingReference - allocate a new db - create new dbs - dbId %d startReadingsIdOnDB %d", dbId, startReadingsId); allocated++; } else { break; } } if (allocated) { m_dbIdLast += allocated; m_dbIdCurrent++; m_dbNAvailable += (allocated - 1); } } ref.tableId = -1; ref.dbId = -1; } } if (success) // Associate a reading table to the asset { // Associate the asset to the reading_id if (emptyAsset.empty()) { ref.tableId = getMaxReadingsId(m_dbIdCurrent) + 1; ref.dbId = m_dbIdCurrent; } { m_EmptyAssetReadingCatalogue.erase(emptyAsset); m_AssetReadingCatalogue.erase(emptyAsset); auto newMapValue = make_pair(asset_code, TableReference(ref.dbId, ref.tableId)); m_AssetReadingCatalogue.insert(newMapValue); } // Allocate the table in the reading catalogue if (emptyAsset.empty()) { sql_cmd = "INSERT INTO " READINGS_DB ".asset_reading_catalogue (table_id, db_id, asset_code) VALUES (" + to_string(ref.tableId) + "," + to_string(ref.dbId) + "," + "\"" + escaped_asset + "\")"; Logger::getLogger()->debug("getReadingReference - allocate a new reading table for the asset '%s' db Id %d readings Id %d ", asset_code, ref.dbId, ref.tableId); } else { sql_cmd = " UPDATE " READINGS_DB ".asset_reading_catalogue SET asset_code ='" + string(escaped_asset) + "'" + " WHERE db_id = " + to_string(ref.dbId) + " AND table_id = " + to_string(ref.tableId) + ";"; Logger::getLogger()->debug("getReadingReference - Use empty table %readings_%d_%d: ",ref.dbId,ref.tableId); } { rc = SQLExec(dbHandle, sql_cmd.c_str()); if (rc != SQLITE_OK) { msg = string(sqlite3_errmsg(dbHandle)) + " asset :" + asset_code + ":"; raiseError("asset_reading_catalogue update", msg.c_str()); } if (emptyAsset.empty()) { allocateReadingAvailable(); } } } else { // Assign to overflow Logger::getLogger()->info("Assign asset %s to the overflow table", asset_code); auto newMapValue = make_pair(asset_code, TableReference(m_nextOverflow, 0)); m_AssetReadingCatalogue.insert(newMapValue); sql_cmd = "INSERT INTO " READINGS_DB ".asset_reading_catalogue (table_id, db_id, asset_code) VALUES ( 0," + to_string(m_nextOverflow) + "," + "\"" + asset_code + "\")"; rc = SQLExec(dbHandle, sql_cmd.c_str()); if (rc != SQLITE_OK) { msg = string(sqlite3_errmsg(dbHandle)) + " asset :" + asset_code + ":"; raiseError("asset_reading_catalogue update", msg.c_str()); } ref.tableId = 0; ref.dbId = m_nextOverflow; if (m_nextOverflow > m_maxOverflowUsed) { m_maxOverflowUsed = m_nextOverflow; } m_nextOverflow++; if (m_nextOverflow > m_dbIdLast) m_nextOverflow = 1; } Logger::getLogger()->debug("Assign: '%s' to %d, %d", asset_code, ref.dbId, ref.tableId); } attachSync->unlock(); } return (ref); } /** * Loads the empty reading table catalogue * */ bool ReadingsCatalogue::loadEmptyAssetReadingCatalogue(bool clean) { std::lock_guard<std::mutex> guard(m_emptyReadingTableMutex); sqlite3 *dbHandle; string sql_cmd; sqlite3_stmt *stmt; ConnectionManager *manager = ConnectionManager::getInstance(); if (clean) { m_EmptyAssetReadingCatalogue.clear(); } // Do not populate m_EmptyAssetReadingCatalogue if data is already there if (m_EmptyAssetReadingCatalogue.size()) { return true; } Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Load empty sset reading catalogue"; connection->setUsage(usage); #endif dbHandle = connection->getDbHandle(); time_t issueThreshold = time(0) - 600; // More than 10 minutes since it was last ussed for (auto &item : m_AssetReadingCatalogue) { string asset_name = item.first; // Asset int tableId = item.second.getTable(); // tableId; int dbId = item.second.getDatabase(); // dbId; if (tableId > 0 && item.second.lastIssued() < issueThreshold) { sql_cmd = "SELECT COUNT(*) FROM readings_" + to_string(dbId) + ".readings_" + to_string(dbId) + "_" + to_string(tableId) + " ;"; if (sqlite3_prepare_v2(dbHandle, sql_cmd.c_str(), -1, &stmt, NULL) != SQLITE_OK) { sqlite3_finalize(stmt); continue; } if (SQLStep(stmt) == SQLITE_ROW) { if (sqlite3_column_int(stmt, 0) == 0) { auto newItem = make_pair(tableId,dbId); auto newMapValue = make_pair(asset_name,newItem); m_EmptyAssetReadingCatalogue.insert(newMapValue); } } sqlite3_finalize(stmt); } } manager->release(connection); return true; } /** * Get Empty Reading Table * * @param asset emptyAsset, copies value of asset for which empty table is found * @return the reading id associated to the provided empty table */ ReadingsCatalogue::tyReadingReference ReadingsCatalogue::getEmptyReadingTableReference(std::string& asset) { ReadingsCatalogue::tyReadingReference emptyTableReference = {-1, -1}; if (m_EmptyAssetReadingCatalogue.size() == 0) { loadEmptyAssetReadingCatalogue(); } auto it = m_EmptyAssetReadingCatalogue.begin(); if (it != m_EmptyAssetReadingCatalogue.end()) { asset = it->first; emptyTableReference.tableId = it->second.first; emptyTableReference.dbId = it->second.second; } return emptyTableReference; } /** * Retrieve the maximum table id for the provided database id * * @param dbId Database id for which the maximum reading id must be retrieved * @return Maximum readings for the requested database id * */ int ReadingsCatalogue::getMaxReadingsId(int dbId) { int maxId = 0; for (auto &item : m_AssetReadingCatalogue) { if (item.second.getDatabase() == dbId && item.second.getTable() > maxId) maxId = item.second.getTable(); } return (maxId); } /** * Returns the number of readings in use * * @return number of readings in use * */ int ReadingsCatalogue::getReadingsCount() { return (m_AssetReadingCatalogue.size()); } /** * Returns the position in the array of the specific readings Id considering the database id and the table id * * @param dbId Database id for which calculation must be evaluated * @param tableId table id for which calculation must be evaluated * @return Position in the array of the specific readings Id * */ int ReadingsCatalogue::getReadingPosition(int dbId, int tableId) { int position; if ((dbId == 0) && (tableId == 0)) { dbId = m_dbIdCurrent; getMaxReadingsId(dbId); } position = ((dbId - 1) * m_storageConfigCurrent.nReadingsPerDb) + tableId; return (position); } /** * Calculate the number of reading tables associated to the given database id * * @param dbId Database id for which calculation must be evaluated * @return Number of reading tables associated to the given database id * */ int ReadingsCatalogue::getUsedTablesDbId(int dbId) { int count = 0; for (auto &item : m_AssetReadingCatalogue) { if (item.second.getTable() != 0 && item.second.getDatabase() == dbId) count++; } return (count); } /** * Delete the content of all the active readings tables using the provided sql command sqlCmdBase * * @param dbHandle Database connection to use for the operations * @param sqlCmdBase Sql command to execute * @param zErrMsg value returned by reference, Error message * @param rowsAffected value returned by reference if != 0, Number of affected rows * @return returns SQLITE_OK if all the sql commands are properly executed * */ int ReadingsCatalogue::purgeAllReadings(sqlite3 *dbHandle, const char *sqlCmdBase, char **zErrMsg, unsigned long *rowsAffected) { string dbReadingsName; string dbName; string sqlCmdTmp; string sqlCmd; bool firstRow; int rc; if (m_AssetReadingCatalogue.empty()) { Logger::getLogger()->debug("purgeAllReadings: no tables defined"); rc = SQLITE_OK; } else { Logger::getLogger()->debug("purgeAllReadings tables defined"); PurgeConfiguration *purgeConfig = PurgeConfiguration::getInstance(); bool exclusions = purgeConfig->hasExclusions(); firstRow = true; if (rowsAffected != nullptr) *rowsAffected = 0; for (auto &item : m_AssetReadingCatalogue) { if (exclusions && purgeConfig->isExcluded(item.first)) { Logger::getLogger()->info("Asset %s excluded from purge", item.first.c_str()); continue; } sqlCmdTmp = sqlCmdBase; dbName = generateDbName(item.second.getDatabase()); dbReadingsName = generateReadingsName(item.second.getDatabase(), item.second.getTable()); StringReplaceAll (sqlCmdTmp, "_assetcode_", item.first); StringReplaceAll (sqlCmdTmp, "_dbname_", dbName); StringReplaceAll (sqlCmdTmp, "_tablename_", dbReadingsName); sqlCmd += sqlCmdTmp; firstRow = false; rc = SQLExec(dbHandle, sqlCmdTmp.c_str(), zErrMsg); Logger::getLogger()->debug("purgeAllReadings: rc:%d, errorMsg:'%s', cmd:'%s'", rc , (*zErrMsg) ? (*zErrMsg) : "", sqlCmdTmp.c_str() ); if (rc != SQLITE_OK) { // sqlite3_free(*zErrMsg); // needed by calling method break; } if (rowsAffected != nullptr) { *rowsAffected += (unsigned long ) sqlite3_changes(dbHandle); } } } return(rc); } /** * Constructs a sql command from the given one consisting of a set of UNION ALL commands * considering all the readings tables in use * * @param sqlCmdBase Base Sql command * @param assetCodes Asset codes to evaluate for the operation * @param considerExclusion If True the asset code in the excluded list must not be considered * @return Full sql command * */ string ReadingsCatalogue::sqlConstructMultiDb(string &sqlCmdBase, vector<string> &assetCodes, bool considerExclusion) { string dbReadingsName; string dbName; string sqlCmdTmp; string sqlCmd; string assetCode; bool addTable; bool addedOne; if (m_AssetReadingCatalogue.empty()) { Logger::getLogger()->debug("sqlConstructMultiDb: no tables defined"); sqlCmd = sqlCmdBase; dbReadingsName = generateReadingsName(1, 1); StringReplaceAll (sqlCmd, "_assetcode_", "dummy_asset_code"); StringReplaceAll (sqlCmd, ".assetcode.", "asset_code"); StringReplaceAll (sqlCmd, "_dbname_", READINGS_DB); StringReplaceAll (sqlCmd, "_tablename_", dbReadingsName); } else { Logger::getLogger()->debug("sqlConstructMultiDb: tables defined"); bool firstRow = true; addedOne = false; PurgeConfiguration *purgeConfig = PurgeConfiguration::getInstance(); bool exclusions = purgeConfig->hasExclusions(); for (auto &item : m_AssetReadingCatalogue) { assetCode=item.first; addTable = false; if (considerExclusion && exclusions && purgeConfig->isExcluded(item.first)) { Logger::getLogger()->info("Asset %s excluded from the query on the multiple readings", item.first.c_str()); continue; } // Exclude the overflow table if (item.second.getTable() == 0) { continue; } // Evaluates which tables should be referenced if (assetCodes.empty()) addTable = true; else { if (std::find(assetCodes.begin(), assetCodes.end(), assetCode) != assetCodes.end()) addTable = true; } if (addTable) { addedOne = true; sqlCmdTmp = sqlCmdBase; if (!firstRow) { sqlCmd += " UNION ALL "; } dbName = generateDbName(item.second.getDatabase()); dbReadingsName = generateReadingsName(item.second.getDatabase(), item.second.getTable()); std::string target ="\""; std::string replacement ="\"\""; StringReplaceAllEx(assetCode, target, replacement); StringReplaceAll(sqlCmdTmp, "_assetcode_", assetCode); StringReplaceAll (sqlCmdTmp, ".assetcode.", "asset_code"); StringReplaceAll(sqlCmdTmp, "_dbname_", dbName); StringReplaceAll(sqlCmdTmp, "_tablename_", dbReadingsName); sqlCmd += sqlCmdTmp; firstRow = false; } } // Add at least one table eventually a dummy one if (! addedOne) { dbReadingsName = generateReadingsName(1, 1); sqlCmd = sqlCmdBase; StringReplaceAll (sqlCmd, "_assetcode_", "dummy_asset_code"); StringReplaceAll (sqlCmd, "_dbname_", READINGS_DB); StringReplaceAll (sqlCmd, "_tablename_", dbReadingsName); } } return sqlCmd; } /** * Add union all clauses for all the overflow tables based on tempalted SQL that is passed * in and a set of assets codes * * @param sqlCmdBase Base Sql command * @param assetCodes Asset codes to evaluate for the operation * @param considerExclusion If True the asset code in the excluded list must not be considered * @param groupBy Include a group by asset_code in each sub query * @return Full sql command * */ string ReadingsCatalogue::sqlConstructOverflow(string &sqlCmdBase, vector<string> &assetCodes, bool considerExclusion, bool groupBy) { string dbReadingsName; string dbName; string sqlCmdTmp; string sqlCmd; string assetCode; bool addTable; bool addedOne; for (int dbId = 1; dbId <= m_maxOverflowUsed; dbId++) { dbReadingsName = generateReadingsName(dbId, 0); sqlCmdTmp = sqlCmdBase; sqlCmd += " UNION ALL "; dbName = generateDbName(dbId); StringReplaceAll (sqlCmdTmp, ".assetcode.", "asset_code"); StringReplaceAll(sqlCmdTmp, "_dbname_", dbName); StringReplaceAll(sqlCmdTmp, "_tablename_", dbReadingsName); sqlCmd += sqlCmdTmp; if (! assetCodes.empty()) { sqlCmd += " WHERE "; bool first = true; for (auto& code : assetCodes) { if (!first) { sqlCmd += " or "; first = false; } sqlCmd += "asset_code = \'"; sqlCmd += code; sqlCmd += "\'"; } } if (groupBy) { sqlCmd += " GROUP By asset_code"; } } return sqlCmd; } /** * Generates a SQLite db alias from the database id * * @param dbId Database id for which the alias must be generated * @return Generated alias * */ string ReadingsCatalogue::generateDbAlias(int dbId) { return (READINGS_DB_NAME_BASE "_" + to_string(dbId)); } /** * Generates a SQLIte database name from the database id * * @param dbId Database id for which the database name must be generated * @return Generated database name * */ string ReadingsCatalogue::generateDbName(int dbId) { return (READINGS_DB_NAME_BASE "_" + to_string(dbId)); } /** * Generates a SQLITE database file name from the database id * * @param dbId Database id for which the database file name must be generated * @return Generated database file name * */ string ReadingsCatalogue::generateDbFileName(int dbId) { return (READINGS_DB_NAME_BASE "_" + to_string (dbId) + ".db"); } /** * Extracts the readings id from the table name * * @param tableName Table name from which the id must be extracted * @return Extracted reading id or -1 on error * */ int ReadingsCatalogue::extractReadingsIdFromName(string tableName) { int dbId; int tableId = -1; string dbIdTableId; try { dbIdTableId = tableName.substr (tableName.find('_') + 1); tableId = stoi(dbIdTableId.substr (dbIdTableId.find('_') + 1)); dbId = stoi(dbIdTableId.substr (0, dbIdTableId.find('_') )); } catch (exception &e) { Logger::getLogger()->fatal("extractReadingsIdFromName: exception on table %s, %s", tableName.c_str(), e.what()); } return tableId; } /** * Extract the database id from the table name * * @param tableName Table name from which the database id must be extracted * @return Extracted database id or -1 on error * */ int ReadingsCatalogue::extractDbIdFromName(string tableName) { int dbId = -1; int tableId; string dbIdTableId; try { dbIdTableId = tableName.substr (tableName.find('_') + 1); tableId = stoi(dbIdTableId.substr (dbIdTableId.find('_') + 1)); dbId = stoi(dbIdTableId.substr (0, dbIdTableId.find('_') )); } catch (exception &e) { Logger::getLogger()->fatal("extractReadingsIdFromName: exception on table %s, %s", tableName.c_str(), e.what()); } return dbId; } /** * Generates the name of the reading table from the given table id as: * Prefix + db Id + reading Id. If the tableId is 0 then this is a * reference to the overflow table * * @param dbId Database id to use for the generation of the table name * @param tableId Table id to use for the generation of the table name * @return Generated reading table name * */ string ReadingsCatalogue::generateReadingsName(int dbId, int tableId) { string tableName; if (dbId == -1) dbId = retrieveDbIdFromTableId(tableId); if (tableId == 0) // Overflow table { tableName = READINGS_TABLE "_" + to_string(dbId) + "_overflow"; } else { tableName = READINGS_TABLE "_" + to_string(dbId) + "_" + to_string(tableId); } Logger::getLogger()->debug("%s - dbId %d tableId %d table name '%s' ", __FUNCTION__, dbId, tableId, tableName.c_str()); return tableName; } /** * Retrieves the database id from the table id * * @param tableId Table id for which the database id must be retrieved * @return Retrieved database id for the requested reading id * */ int ReadingsCatalogue::retrieveDbIdFromTableId(int tableId) { int dbId; dbId = -1; for (auto &item : m_AssetReadingCatalogue) { if (item.second.getTable() == tableId) { dbId = item.second.getDatabase(); break; } } return (dbId); } /** * Identifies SQLIte database name from the given table id * * @param tableId Table id for which the database name must be retrieved * @return Retrieved database name for the requested reading id * */ string ReadingsCatalogue::generateDbNameFromTableId(int tableId) { string dbName; for (auto &item : m_AssetReadingCatalogue) { if (item.second.getTable() == tableId) { dbName = READINGS_DB_NAME_BASE "_" + to_string(item.second.getDatabase()); break; } } if (dbName == "") dbName = READINGS_DB_NAME_BASE "_1"; return (dbName); } /** * SQLIte wrapper to retry statements when the database is locked * * @param dbHandle Database connection to use for the operations * @param sqlCmdsql The SQL to execute * @param errmsg Returned by reference, error message * @return SQLite constant indicating the outcome of the requested operation, like for example SQLITE_LOCKED, SQLITE_BUSY... * */ int ReadingsCatalogue::SQLExec(sqlite3 *dbHandle, const char *sqlCmd, char **errMsg) { int retries = 0, rc; if (errMsg) { *errMsg = NULL; } Logger::getLogger()->debug("SQLExec: cmd '%s' ", sqlCmd); do { if (errMsg && *errMsg) { sqlite3_free(*errMsg); *errMsg = NULL; } rc = sqlite3_exec(dbHandle, sqlCmd, NULL, NULL, errMsg); Logger::getLogger()->debug("SQLExec: rc %d ", rc); retries++; if (rc == SQLITE_LOCKED || rc == SQLITE_BUSY) { int interval = (retries * RETRY_BACKOFF); usleep(interval); // sleep retries milliseconds if (retries > 5) Logger::getLogger()->info("SQLExec - error '%s' retry %d of %d, rc=%s, DB connection @ %p, slept for %d msecs", sqlite3_errmsg(dbHandle), retries, MAX_RETRIES, (rc==SQLITE_LOCKED)?"SQLITE_LOCKED":"SQLITE_BUSY", this, interval); } } while (retries < MAX_RETRIES && (rc == SQLITE_LOCKED || rc == SQLITE_BUSY)); if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("SQLExec - Database still locked after maximum retries"); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("SQLExec - Database still busy after maximum retries"); } return rc; } /** * SQLIte wrapper to retry statements when the database error occurs * * @param statement SQLIte statement to execute * @return SQLite constant indicating the outcome of the requested operation, like for example SQLITE_LOCKED, SQLITE_BUSY... * */ int ReadingsCatalogue::SQLStep(sqlite3_stmt *statement) { int retries = 0, rc; do { rc = sqlite3_step(statement); retries++; if (rc == SQLITE_LOCKED || rc == SQLITE_BUSY) { int interval = (retries * RETRY_BACKOFF); usleep(interval); // sleep retries milliseconds if (retries > 5) Logger::getLogger()->info("SQLStep: retry %d of %d, rc=%s, DB connection @ %p, slept for %d msecs", retries, MAX_RETRIES, (rc==SQLITE_LOCKED)?"SQLITE_LOCKED":"SQLITE_BUSY", this, interval); } } while (retries < MAX_RETRIES && (rc == SQLITE_LOCKED || rc == SQLITE_BUSY)); if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("Database still locked after maximum retries"); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("Database still busy after maximum retries"); } return rc; } /** * Remove committed TRANSACTION for the given thread * * @param tid The thread id * that just committed a transaction */ void TransactionBoundary::ClearThreadTransaction(std::thread::id tid) { // Lock m_boundaries map std::lock_guard<std::mutex> lck(m_boundaryLock); // Find thread id auto itr = m_boundaries.find(tid); if (itr != m_boundaries.end()) { // Remove element m_boundaries.erase(itr); #if LOG_TX_BOUNDARIES Logger::getLogger()->debug("ClearThreadTransaction: thread [%ld] cleared TX start %ld", tid, itr->second); #endif } else { Logger::getLogger()->error("ClearThreadTransaction: thread [%ld] not found", tid); } } /** * Set BEGIN of a transaction for a given thread, reading id * * @param tid The thread id * that just started a transaction * @param id The global reading id that starts the transaction */ void TransactionBoundary::SetThreadTransactionStart(std::thread::id tid, unsigned long id) { // Lock m_boundaries map std::lock_guard<std::mutex> lck(m_boundaryLock); // Set id per thread m_boundaries[tid] = id; #if LOG_TX_BOUNDARIES Logger::getLogger()->debug("SetThreadTransactionStart: thread [%ld] set TX start at %ld", tid, id); #endif } /** * Fetch the minimum safe global reading id * among all UNCOMMITTED per thread transactions * * @return The safe global reading id to use in * UNION ALL queries as boundary limit */ unsigned long TransactionBoundary::GetMinReadingId() { // Lock m_boundaries map std::lock_guard<std::mutex> lck(m_boundaryLock); unsigned long id = 0; // Get minimum reading id auto it = std::min_element(std::begin(m_boundaries), std::end(m_boundaries), // Lambda compare function [](std::pair<std::thread::id ,unsigned long> i, std::pair<std::thread::id ,unsigned long> j) { return i.second < j.second; }); // Found, set id if (it != m_boundaries.end()) { id = it->second; } #ifdef LOG_TX_BOUNDARIES std::thread::id tid = std::this_thread::get_id(); Logger::getLogger()->debug("GetMinReadingId: thread [%ld] TX min id is %ld", tid, id); #endif return id; } ================================================ FILE: C/plugins/storage/sqlite/include/common.h ================================================ #ifndef _COMMON_CONNECTION_H #define _COMMON_CONNECTION_H #include <sql_buffer.h> #include <iostream> #include <sqlite3.h> #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <string> #include <map> #include <stdarg.h> #include <stdlib.h> #include <sstream> #include <logger.h> #include <time.h> #include <unistd.h> #include <chrono> #include <thread> #include <atomic> #include <condition_variable> #include <sys/time.h> #include <connection.h> #define STORAGE_PURGE_RETAIN_ANY 0x0001U #define STORAGE_PURGE_RETAIN_ALL 0x0002U #define STORAGE_PURGE_SIZE 0x0004U static std::map<std::string, std::string> sqliteDateFormat = { {"HH24:MI:SS", F_TIMEH24_S}, {"YYYY-MM-DD HH24:MI:SS.MS", F_DATEH24_MS}, {"YYYY-MM-DD HH24:MI:SS", F_DATEH24_S}, {"YYYY-MM-DD HH24:MI", F_DATEH24_M}, {"YYYY-MM-DD HH24", F_DATEH24_H}, {"", ""} }; #endif ================================================ FILE: C/plugins/storage/sqlite/include/profile.h ================================================ #ifndef _PROFILE_H #define _PROFILE_H /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <vector> #include <sys/time.h> #include <logger.h> #define TIME_BUCKETS 20 #define BUCKET_SIZE 5 class ProfileItem { public: ProfileItem(const std::string& reference) : m_reference(reference) { gettimeofday(&m_tvStart, NULL); auto timenow = chrono::system_clock::to_time_t(chrono::system_clock::now()); m_ts = std::string(ctime(&timenow)); m_ts.back() = '\0'; }; ~ProfileItem() {}; void complete() { struct timeval tv; gettimeofday(&tv, NULL); m_duration = (tv.tv_sec - m_tvStart.tv_sec) * 1000 + (tv.tv_usec - m_tvStart.tv_usec) / 1000; }; unsigned long getDuration() { return m_duration; }; const std::string& getReference() const { return m_reference; }; const std::string& getTs() const { return m_ts; }; private: std::string m_reference; struct timeval m_tvStart; unsigned long m_duration; std::string m_ts; }; class QueryProfile { public: QueryProfile(int samples) : m_samples(samples) { time(&m_lastReport); }; void insert(ProfileItem *item) { int b = item->getDuration() / BUCKET_SIZE; if (b >= TIME_BUCKETS) b = TIME_BUCKETS - 1; m_buckets[b]++; if (m_items.size() == m_samples) { int minIndex = 0; unsigned long minDuration = m_items[0]->getDuration(); for (int i = 1; i < m_items.size(); i++) { if (m_items[i]->getDuration() < minDuration) { minDuration = m_items[i]->getDuration(); minIndex = i; } } if (item->getDuration() > minDuration) { delete m_items[minIndex]; m_items[minIndex] = item; } else { delete item; } } else { m_items.push_back(item); } if (time(0) - m_lastReport > 600) { report(); } }; private: int m_samples; std::vector<ProfileItem *> m_items; time_t m_lastReport; unsigned int m_buckets[TIME_BUCKETS]; void report() { Logger *logger = Logger::getLogger(); logger->info("Storage profile report"); logger->info(" < %3d mS %d", BUCKET_SIZE, m_buckets[0]); for (int j = 1; j < TIME_BUCKETS - 1; j++) { logger->info("%3d-%3d mS %d", j * BUCKET_SIZE, (j + 1) * BUCKET_SIZE, m_buckets[j]); } logger->info(" > %3d mS %d", BUCKET_SIZE * TIME_BUCKETS, m_buckets[TIME_BUCKETS-1]); for (int i = 0; i < m_items.size(); i++) { logger->info("%ld mS, %s, %s\n", m_items[i]->getDuration(), m_items[i]->getTs().c_str(), m_items[i]->getReference().c_str()); } time(&m_lastReport); }; }; #endif ================================================ FILE: C/plugins/storage/sqlite/plugin.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sqlite_common.h> #include <connection_manager.h> #include <connection.h> #include <plugin_api.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include "sqlite3.h" #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include <sstream> #include <iostream> #include <string> #include <logger.h> #include <plugin_exception.h> #include <reading_stream.h> #include <config_category.h> #include <readings_catalogue.h> #include <purge_configuration.h> #include <string_utils.h> using namespace std; using namespace rapidjson; /** * The SQLite3 plugin interface */ extern "C" { const char *default_config = QUOTE({ "poolSize" : { "description" : "The number of connections to create in the intial pool of connections", "type" : "integer", "minimum": "1", "maximum": "10", "default" : "5", "displayName" : "Pool Size", "order" : "1" }, "nReadingsPerDb" : { "description" : "The number of unique assets tables to maintain in each database that is created", "type" : "integer", "minimum": "1", "default" : "15", "maximum": "00", "displayName" : "No. Readings per database", "order" : "2" }, "nDbPreallocate" : { "description" : "Number of databases to allocate in advance. NOTE: SQLite has a default maximum of 10 attachable databases", "type" : "integer", "default" : "3", "minimum": "1", "maximum" : "10", "displayName" : "No. databases to allocate in advance", "order" : "3" }, "nDbLeftFreeBeforeAllocate" : { "description" : "Allocate new databases when the number of free databases drops below this value", "type" : "integer", "default" : "1", "minimum": "1", "maximum": "10", "displayName" : "Database allocation threshold", "order" : "4" }, "nDbToAllocate" : { "description" : "The number of databases to create whenever the number of available databases drops below the allocation threshold", "type" : "integer", "default" : "2", "minimum" : "1", "maximum" : "10", "displayName" : "Database allocation size", "order" : "5" }, "purgeExclude" : { "description" : "A comma seperated list of assets to exclude from the purge process", "type" : "string", "default" : "", "displayName" : "Purge Exclusions", "order" : "6" }, "vacuumInterval" : { "description" : "The interval between execution of a SQLite vacuum command", "type" : "integer", "minimum" : "1", "default" : "6", "displayName" : "Vacuum Interval", "order" : "7" }, "deployment" : { "description" : "The type of Fledge deployment", "type" : "enumeration", "options" : [ "Small", "Normal", "High Bandwidth" ], "default" : "Normal", "displayName" : "Deployment", "order" : "8" } }); /** * The plugin information structure */ static PLUGIN_INFORMATION info = { "SQLite3", // Name "1.2.0", // Version SP_COMMON|SP_READINGS, // Flags PLUGIN_TYPE_STORAGE, // Type "1.6.0", // Interface version default_config }; /** * Return the information about this plugin */ PLUGIN_INFORMATION *plugin_info() { return &info; } /** * Initialise the plugin, called to get the plugin handle * In the case of SQLLite we also get a pool of connections * to use. */ PLUGIN_HANDLE plugin_init(ConfigCategory *category) { ConnectionManager *manager = ConnectionManager::getInstance(); // Create a copy as the category we are called with has been constructed on the stack ConfigCategory *newCategory = new ConfigCategory(category); manager->setConfiguration(newCategory); STORAGE_CONFIGURATION storageConfig; if (category->itemExists("poolSize")) { storageConfig.poolSize = strtol(category->getValue("poolSize").c_str(), NULL, 10); } manager->growPool(storageConfig.poolSize); if (category->itemExists("nReadingsPerDb")) { storageConfig.nReadingsPerDb = strtol(category->getValue("nReadingsPerDb").c_str(), NULL, 10); } if (category->itemExists("nDbPreallocate")) { storageConfig.nDbPreallocate = strtol(category->getValue("nDbPreallocate").c_str(), NULL, 10); } if (category->itemExists("nDbLeftFreeBeforeAllocate")) { storageConfig.nDbLeftFreeBeforeAllocate = strtol(category->getValue("nDbLeftFreeBeforeAllocate").c_str(), NULL, 10); } if (category->itemExists("nDbToAllocate")) { storageConfig.nDbToAllocate = strtol(category->getValue("nDbToAllocate").c_str(), NULL, 10); } ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); readCat->multipleReadingsInit(storageConfig); if (category->itemExists("purgeExclude")) { string exclusions = category->getValue("purgeExclude"); PurgeConfiguration *purge = PurgeConfiguration::getInstance(); size_t s = 0, pos; while ((pos = exclusions.find_first_of(",", s)) != string::npos) { purge->exclude(StringTrim(exclusions.substr(s, pos - s))); s = pos + 1; } purge->exclude(StringTrim(exclusions.substr(s, pos))); } if (category->itemExists("vacuumInterval")) { manager->setVacuumInterval(strtol(category->getValue("vacuumInterval").c_str(), NULL, 10)); } return manager; } /** * Insert into an arbitrary table */ int plugin_common_insert(PLUGIN_HANDLE handle, char *schema, char *table, char *data) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Insert into " + string(table); connection->setUsage(usage); #endif int result = connection->insert(std::string(schema), std::string(table), std::string(data)); manager->release(connection); return result; } /** * Retrieve data from an arbitrary table */ const char *plugin_common_retrieve(PLUGIN_HANDLE handle, char *schema, char *table, char *query) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; #if TRACK_CONNECTION_USER string usage = "Retrieve from " + string(table); connection->setUsage(usage); #endif bool rval = connection->retrieve(std::string(schema), std::string(table), std::string(query), results); manager->release(connection); if (rval) { return strdup(results.c_str()); } return NULL; } /** * Update an arbitary table */ int plugin_common_update(PLUGIN_HANDLE handle, char *schema, char *table, char *data) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Update " + string(table); connection->setUsage(usage); #endif int result = connection->update(std::string(schema), std::string(table), std::string(data)); manager->release(connection); return result; } /** * Delete from an arbitrary table */ int plugin_common_delete(PLUGIN_HANDLE handle, char *schema, char *table, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Delete from " + string(table); connection->setUsage(usage); #endif int result = connection->deleteRows(std::string(schema), std::string(table), std::string(condition)); manager->release(connection); return result; } /** * Append a sequence of readings to the readings buffer */ int plugin_reading_append(PLUGIN_HANDLE handle, char *readings) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Reading append"; connection->setUsage(usage); #endif int result = connection->appendReadings(readings); manager->release(connection); return result;; } /** * Append a stream of readings to the readings buffer */ int plugin_readingStream(PLUGIN_HANDLE handle, ReadingStream **readings, bool commit) { int result = 0; ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Reading Stream"; connection->setUsage(usage); #endif result = connection->readingStream(readings, commit); manager->release(connection); return result;; } /** * Fetch a block of readings from the readings buffer */ char *plugin_reading_fetch(PLUGIN_HANDLE handle, unsigned long id, unsigned int blksize) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string resultSet; #if TRACK_CONNECTION_USER string usage = "Fetch readings"; connection->setUsage(usage); #endif connection->fetchReadings(id, blksize, resultSet); manager->release(connection); return strdup(resultSet.c_str()); } /** * Retrieve some readings from the readings buffer */ char *plugin_reading_retrieve(PLUGIN_HANDLE handle, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; #if TRACK_CONNECTION_USER string usage = "Reading retrieve"; connection->setUsage(usage); #endif connection->retrieveReadings(std::string(condition), results); manager->release(connection); return strdup(results.c_str()); } /** * Purge readings from the buffer */ char *plugin_reading_purge(PLUGIN_HANDLE handle, unsigned long param, unsigned int flags, unsigned long sent) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; unsigned long age, size; #if TRACK_CONNECTION_USER string usage = "Purge"; connection->setUsage(usage); #endif if (flags & STORAGE_PURGE_SIZE) { (void)connection->purgeReadingsByRows(param, flags, sent, results); } else { age = param; (void)connection->purgeReadings(age, flags, sent, results); } manager->release(connection); return strdup(results.c_str()); } /** * Release a previously returned result set */ void plugin_release(PLUGIN_HANDLE handle, char *results) { (void)handle; free(results); } /** * Return details on the last error that occured. */ PLUGIN_ERROR *plugin_last_error(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; return manager->getError(); } /** * Shutdown the plugin */ bool plugin_shutdown(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Shutdown"; connection->setUsage(usage); #endif if (connection->supportsReadings()) { connection->shutdownAppendReadings(); ReadingsCatalogue *readCat = ReadingsCatalogue::getInstance(); readCat->storeGlobalId(); } manager->release(connection); manager->shutdown(); return true; } /** * Create snapshot of a common table * * @param handle The plugin handle * @param table The table to shapshot * @param id The snapshot id * @return -1 on error, >= o on success * * The new created table has the following name: * table_id */ int plugin_create_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Snapshot " + string(table); connection->setUsage(usage); #endif int result = connection->create_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Load a snapshot of a common table * * @param handle The plugin handle * @param table The table to fill from a given snapshot * @param id The table snapshot id * @return -1 on error, >= o on success */ int plugin_load_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Load snapshot " + string(table); connection->setUsage(usage); #endif int result = connection->load_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Delete a snapshot of a common table * * @param handle The plugin handle * @param table The table which shapshot will be removed * @param id The snapshot id * @return -1 on error, >= o on success * */ int plugin_delete_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Delete snapshot " + string(table); connection->setUsage(usage); #endif int result = connection->delete_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Get all snapshots of a given common table * * @param handle The plugin handle * @param table The table name * @return List of snapshots (even empty list) or NULL for errors * */ const char* plugin_get_table_snapshots(PLUGIN_HANDLE handle, char *table) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; #if TRACK_CONNECTION_USER string usage = "Get table snapshots" + string(table); connection->setUsage(usage); #endif bool rval = connection->get_table_snapshots(std::string(table), results); manager->release(connection); return rval ? strdup(results.c_str()) : NULL; } /** * Update or creats a schema * * @param handle The plugin handle * @param schema The name of the schema * @param definition The schema definition * @return -1 on error, >= 0 on success * */ int plugin_createSchema(PLUGIN_HANDLE handle, char *definition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Create schema"; connection->setUsage(usage); #endif int result = connection->createSchema(std::string(definition)); manager->release(connection); return result; } /** * Purge given readings asset or all readings from the buffer */ unsigned int plugin_reading_purge_asset(PLUGIN_HANDLE handle, char *asset) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); #if TRACK_CONNECTION_USER string usage = "Purge asset "; connection->setUsage(usage); #endif unsigned int deleted = connection->purgeReadingsAsset(asset); manager->release(connection); return deleted; } }; ================================================ FILE: C/plugins/storage/sqlite/schema/include/schema.h ================================================ #ifndef _SCHEMAS_H #define _SCHEMAS_H /* * Fledge utilities functions for handling stringa * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <sql_buffer.h> #include <string> #include <rapidjson/document.h> #include <sqlite3.h> #include <logger.h> #include <map> #define DDL_BACKOFF 50 // Microseconds to backoff between DDL retries /** * Representation of an extension schema * * Each active schema has an instance of the class that is managed by the * SchemaManager class. This is either created when Fledge restarts and reads the schema * definition from the database or when a service requests an extension schema to be * created. * * The class is responsible for the creation and update of the extension schemas. The * name, service, version and definition of each schema is written to a control table * in the Fledge schema to allow for the versions to be tracked and also to allow the * extension schemas to be attached. */ class Schema { public: Schema(const std::string& name, const std::string& service, int version, const std::string& definition); Schema(sqlite3 *db, const rapidjson::Document& doc); ~Schema(); int getVersion() { return m_version; }; std::string getService() { return m_service; }; bool upgrade(sqlite3 *db, const rapidjson::Document& doc, const std::string& definition); bool attach(sqlite3 *db); private: std::string m_name; std::string m_service; int m_version; std::string m_definition; int m_indexNo; std::string m_schemaPath; std::map<sqlite3 *, bool> m_attached; private: bool createTable(sqlite3 *db, const rapidjson::Value& table); bool createIndex(sqlite3 *db, const std::string& table, const rapidjson::Value& index); bool hasTable(const rapidjson::Document& doc, const std::string& table); bool hasColumn(const rapidjson::Document& doc, const std::string& table, const std::string& column); bool addTableColumn(sqlite3 *db, const std::string& table, const rapidjson::Value& column); bool executeDDL(sqlite3 *db, SQLBuffer& sql); bool hasString(const rapidjson::Value& value, const char *key) { return (value.HasMember(key) && value[key].IsString()); }; bool hasInt(const rapidjson::Value& value, const char *key) { return (value.HasMember(key) && value[key].IsInt()); }; bool hasArray(const rapidjson::Value& value, const char *key) { return (value.HasMember(key) && value[key].IsArray()); }; bool createDatabase(); void setDatabasePath(); }; /** * The singleton SchemaManager class used to interact with * the extension schemas created by various extension services. */ class SchemaManager { public: static SchemaManager *getInstance(); void load(sqlite3 *db); bool create(sqlite3 *db, const std::string& definition); bool exists(sqlite3 *db, const std::string& schema); public: static SchemaManager *instance; private: SchemaManager(); private: Logger *m_logger; std::map<std::string, Schema *> m_schema; bool m_loaded; }; #endif ================================================ FILE: C/plugins/storage/sqlite/schema/schema.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2022 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <schema.h> #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <unistd.h> #include <connection.h> #ifndef DB_CONFIGURATION #define DB_CONFIGURATION "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;" #endif using namespace std; using namespace rapidjson; SchemaManager *SchemaManager::instance = 0; /** * Fetch the singleton instance of the SchemaManager * * @return SchemaManager* The singleton SchemaManager instance */ SchemaManager *SchemaManager::getInstance() { if (!instance) instance = new SchemaManager(); return instance; } /** * Constructor for the singleton SchemaManager */ SchemaManager::SchemaManager() : m_loaded(false) { m_logger = Logger::getLogger(); } /** * Load the existing Schema from the table of supported schemas * * @param db The database connection to use to load the schema information */ void SchemaManager::load(sqlite3 *db) { const char *sql = "SELECT name, service, version, definition FROM fledge.service_schema;"; sqlite3_stmt *stmt; int rc = sqlite3_prepare_v2(db, sql, -1, &stmt, NULL); if (rc != SQLITE_OK) { m_logger->error("Failed to retrieve list of schemas"); return; } if (stmt) { while ((rc = sqlite3_step(stmt)) == SQLITE_ROW) { string name = (const char *)sqlite3_column_text(stmt, 0); string service = (const char *)sqlite3_column_text(stmt, 1); int version = sqlite3_column_int(stmt, 2); string definition = (const char *)sqlite3_column_text(stmt, 3); m_schema.insert(pair<string, Schema *>(name, new Schema(name, service, version, definition))); } sqlite3_finalize(stmt); } m_loaded = true; } /** * Create a schema. This may create a completely new schema if it does not * already exist, update an existing schema if the version is different from * the one that already exists or no nothing if the schema exists and the version * of the schema is the same. * * @param db A database connection to use for sqlite3 interactions * @param definition The schema definition * @return bool Returns true if the schema was created, updated or no action is required */ bool SchemaManager::create(sqlite3 *db, const std::string& definition) { Document doc; doc.Parse(definition.c_str()); if (doc.HasParseError()) { m_logger->error("Failed to parse extension schema definition '%s' at %d: %s", GetParseError_En(doc.GetParseError()), doc.GetErrorOffset(), definition.c_str()); return false; } string name; if (doc.HasMember("schema") && doc["schema"].IsString()) { name = doc["schema"].GetString(); } else { m_logger->error("Extension schema is missing the schema name in the definition"); return false; } if (!m_loaded) { load(db); } auto it = m_schema.find(name); if (it == m_schema.end()) { Schema *schema = new Schema(db, doc); m_schema.insert(pair<string, Schema *>(name, schema)); } else { int version; if (doc.HasMember("version") && doc["version"].IsInt()) { version = doc["version"].GetInt(); } else { m_logger->error("Extension schema %s is missing a version number", name.c_str()); return false; } if (it->second->getVersion() != version) { return it->second->upgrade(db, doc, definition); } } return false; } /** * Check if a named schema exists, loading the schemas if need be. As * a side effect the schema will be attached to the database connection. * * @param schema The schema to check the existance of * @return bool True if the schema exists */ bool SchemaManager::exists(sqlite3 *db, const string& schema) { if (schema.compare("fledge") == 0) // The fledge schema always exists return true; if (!m_loaded) { load(db); } auto it = m_schema.find(schema); if (it == m_schema.end()) { return false; } return it->second->attach(db); } /** * Constructor for a schema. This is the case in which a schema is beign loaded from * the database of schemas rather than becasue a service has requested a schema to be * created. * * Schemas will be loaded from the database before any schema creation request is * made by the services and will be used to attched the schemas and also to load the * baseline schema such that when the service requests a schema we can see if it already * exists and has the same version number. * * @param name The name of the schema to create * @param service The service requesting the schema * @param version The version of the schema * @param definition The JSON definition of the schema */ Schema::Schema(const string& name, const string& service, int version, const string& definition) : m_name(name), m_service(service), m_version(version), m_definition(definition), m_indexNo(0) { setDatabasePath(); } /** * Constructor for a schema. This is the case when a service has requested a schema * that does not already exist. We must create a new schema from scratch. * * @param db SQLite3 database handle * @param doc JSON definition of the schema to create */ Schema::Schema(sqlite3 *db, const rapidjson::Document& doc) : m_indexNo(0) { Logger *logger = Logger::getLogger(); if (hasString(doc, "schema")) { m_name = doc["schema"].GetString(); } else { logger->error("Schema definition is missing a schema name property"); throw runtime_error("Schema missing a name property"); } if (hasString(doc, "service")) { m_service = doc["service"].GetString(); } else { logger->error("Schema definition %s is missing a service property", m_name.c_str()); throw runtime_error("Schema missing a service property"); } if (hasInt(doc, "version")) { m_version = doc["version"].GetInt(); } else { logger->error("Schema definition %s for service %s is missing a version property", m_name.c_str(), m_service.c_str()); throw runtime_error("Schema missing a version property"); } setDatabasePath(); // Create the Schema createDatabase(); // Attach the database file to this database connection attach(db); // Create the tables in the schema if (hasArray(doc, "tables")) { const Value& tables = doc["tables"]; for (auto& table : tables.GetArray()) { createTable(db, table); } } SQLBuffer sql; sql.append("INSERT INTO fledge.service_schema ( name, service, version, definition ) VALUES ("); sql.quote(m_name); sql.append(','); sql.quote(m_service); sql.append(','); sql.append(m_version); sql.append(','); sql.quote(m_definition); sql.append(");"); if (!executeDDL(db, sql)) { logger->error("Failed to add schema to dictionary"); } } /** * Create a table within the extension schema * * @param db SQLite3 database handle * @param table JSON representation of the table definition */ bool Schema::createTable(sqlite3 *db, const rapidjson::Value& table) { Logger *logger = Logger::getLogger(); if (!hasString(table, "name")) { logger->error("Table in schema %s is missing a name definition", m_name.c_str()); return false; } string name = table["name"].GetString(); if (!hasArray(table, "columns")) { logger->error("Table %s in schema %s has no columns defined", name.c_str(), m_name.c_str()); return false; } const Value& columns = table["columns"]; SQLBuffer sql; sql.append("CREATE TABLE "); sql.append(m_name); sql.append('.'); sql.append(name); sql.append(" ("); bool first = true; for (auto& column : columns.GetArray()) { if (first) first = false; else sql.append(','); if (!hasString(column, "column")) { logger->error("Table %s in schema %s is missing a column name definition", name.c_str(),m_name.c_str()); return false; } string col = column["column"].GetString(); if (!hasString(column, "type")) { logger->error("Column %s in table %s in schema %s is missing a column name definition", col.c_str(), name.c_str(), m_name.c_str()); return false; } string type = column["type"].GetString(); sql.append(col); if (type.compare("integer") == 0) { sql.append(" INTEGER"); } else if (type.compare("varchar") == 0) { if (!hasInt(column, "size")) { } int size = column["size"].GetInt(); sql.append(" CHARACTER VARYING("); sql.append(size); sql.append(')'); } else if (type.compare("double") == 0) { sql.append(" REAL"); } else if (type.compare("sequence") == 0) { sql.append(" INTEGER AUTOINCREMENT"); } else { logger->error("Type %s is not supported in column %s of table %s in schema %s", type.c_str(), col.c_str(), name.c_str(), m_name.c_str()); return false; } if (column.HasMember("key")) { sql.append(" PRIMARY KEY"); } } sql.append(");"); // Execute the SQL statement if (!executeDDL(db, sql)) { return false; } // Now create any indexes on the table if (hasArray(table, "indexes")) { const Value& indexes = table["indexes"]; for (auto& index : indexes.GetArray()) { if (!createIndex(db, name, index)) { return false; } } } return true; } /** * Create an index on a table * * @param db SQLite database connection * @param table The name of the table the index is created on * @param index JSON defintion of the index */ bool Schema::createIndex(sqlite3 *db, const std::string& table, const rapidjson::Value& index) { if (!index.IsArray()) { Logger::getLogger()->error("Malformed index for table %s in schema %s", table.c_str(), m_name.c_str()); return false; } SQLBuffer sql; sql.append("CREATE INDEX "); sql.append(m_name); sql.append("."); sql.append(table); sql.append("_idx"); sql.append(m_indexNo++); sql.append(" ON "); sql.append(table); sql.append('('); bool first = true; for (auto& col : index.GetArray()) { if (col.IsString()) { if (first) first = false; else sql.append(','); sql.append(col.GetString()); } } sql.append(");"); // Execute the SQL statement return executeDDL(db, sql); } /** * Upgrade an existing schema. The upgrade process is limited and will only do the * following operations; add a new table, drop a table, add a new column to a table, * drop a column from a table, add a new index or drop an index. * * @param db The SQLite3 database connection * @param doc The pre-parsed version of the schema definition * @param definition The schema defintion for the new version of the schema as JSON * @param bool True if the upgrade suceeded. */ bool Schema::upgrade(sqlite3 *db, const Document& doc, const string& definition) { Logger *logger = Logger::getLogger(); Document onDisk; onDisk.Parse(m_definition.c_str()); logger->debug("Schema update: %s: Phase 1 - adding any new tables", m_name.c_str()); // Iterate over the new schema tables and find any not in the existing schema if (hasArray(doc, "tables")) { const Value& newTables = doc["tables"]; for (auto& newTable : newTables.GetArray()) { if (hasString(newTable, "name")) { string name = newTable["name"].GetString(); if (!hasTable(onDisk, name)) { logger->debug("Schema Upgrade of %s create table %s", m_name.c_str(), name.c_str()); if (!createTable(db, newTable)) { logger->error("Unable to create new table during schema upgrade for schema %s", m_name.c_str()); return false; } } } } } // Now look for tables that need to be dropped logger->debug("Schema update: %s: Phase 2 - deleting any obsolete tables", m_name.c_str()); if (hasArray(onDisk, "tables")) { const Value& oldTables = onDisk["tables"]; for (auto& oldTable : oldTables.GetArray()) { if (hasString(oldTable, "name")) { string name = oldTable["name"].GetString(); if (!hasTable(doc, name)) { logger->debug("Schema Upgrade of %s drop table %s", m_name.c_str(), name.c_str()); SQLBuffer sql; sql.append("DROP TABLE IF EXISTS "); sql.append(m_name); sql.append('.'); sql.append(name); sql.append(';'); if (!executeDDL(db, sql)) { return false; } } } } } logger->debug("Schema update: %s: Phase 3 - add any new columns to tables", m_name.c_str()); // Iterate over the new schema tables in both and then check for new columns if (hasArray(doc, "tables")) { const Value& newTables = doc["tables"]; for (auto& newTable : newTables.GetArray()) { if (hasString(newTable, "name")) { string name = newTable["name"].GetString(); if (hasTable(onDisk, name)) { if (hasArray(newTable, "columns")) { const Value& columns = newTable["columns"]; for (auto& column : columns.GetArray()) { if (hasString(column, "column")) { string col = column["column"].GetString(); if (!hasColumn(onDisk, name, col)) { if (!addTableColumn(db, name, column)) { return false; } } } } } } } } } logger->debug("Schema update: %s: Phase 4 - remove any obsolete columns from tables", m_name.c_str()); // Iterate over the on disk tables looking for tables that exist in the new schema // and then look for columns that are on disk but not in the new schema if (hasArray(onDisk, "tables")) { const Value& oldTables = onDisk["tables"]; for (auto& oldTable : oldTables.GetArray()) { if (hasString(oldTable, "name")) { string name = oldTable["name"].GetString(); if (hasTable(doc, name)) { if (hasArray(oldTable, "columns")) { const Value& columns = oldTable["columns"]; for (auto& column : columns.GetArray()) { if (hasString(column, "column")) { string col = column["column"].GetString(); if (!hasColumn(doc, name, col)) { logger->debug("Schema Upgrade of %s drop column %s from table %s", m_name.c_str(), col.c_str(), name.c_str()); SQLBuffer sql; sql.append("ALTER TABLE "); sql.append(m_name); sql.append('.'); sql.append(name); sql.append(" DROP COLUMN "); sql.append(col); sql.append(';'); if (!executeDDL(db, sql)) { return false; } } } } } } } } } logger->debug("Schema update: %s: Phase 5 - add any new indexes", m_name.c_str()); // Iterate over the new schema tables in both and then check for new columns if (hasArray(doc, "tables")) { const Value& newTables = doc["tables"]; for (auto& newTable : newTables.GetArray()) { if (hasString(newTable, "name")) { string name = newTable["name"].GetString(); if (hasTable(onDisk, name)) { if (hasArray(newTable, "indexes")) { const Value& indexes = newTable["indexes"]; for (auto& index : indexes.GetArray()) { if (hasArray(index, "index")) { // TODO Compare indexes } } } } } } } logger->debug("Schema update: %s: Phase 6 - remove any obsolete indexes", m_name.c_str()); m_version = doc["version"].GetInt(); // Safe as we would not get here if version was missing m_definition = definition; logger->debug("Schema update: %s: Phase 7 - update schema table", m_name.c_str()); SQLBuffer sql; sql.append("UPDATE fledge.service_schema SET version = "); sql.append(m_version); sql.append(", definition = "); sql.quote(m_definition); sql.append(" WHERE name = "); sql.quote(m_name); sql.append(" AND service = "); sql.quote(m_service); sql.append(';'); if (!executeDDL(db, sql)) return false; return true; } /** * Add a new column to an existing table within the schema * * @param db The SQLite database handle * @param table The name of the table we are adding the column to * @param column The JSON definition of the column * @return bool True if the column was added to the table */ bool Schema::addTableColumn(sqlite3 *db, const string& table, const Value& column) { Logger *logger = Logger::getLogger(); SQLBuffer sql; sql.append("ALTER TABLE "); sql.append(m_name); sql.append('.'); sql.append(table); sql.append(" ADD COLUMN "); if (!hasString(column, "column")) { logger->error("Schema update %s, missing name for column in table %s", m_name.c_str(), table.c_str()); return false; } string colName = column["colummn"].GetString(); sql.append(colName); if (!hasString(column, "type")) { logger->error("Schema update %s, missing type for column %s in table %s", m_name.c_str(), colName.c_str(), table.c_str()); return false; } string type = column["type"].GetString(); if (type.compare("integer") == 0) { sql.append(" INTEGER"); } else if (type.compare("varchar") == 0) { sql.append(" CHARACTER VARYING("); sql.append(')'); } else if (type.compare("double") == 0) { sql.append(" REAL"); } else if (type.compare("sequence") == 0) { sql.append(" INTEGER AUTOINCREMENT"); } else { logger->error("Update schema type %s is not supported for column %s of table %s in schema %s", type.c_str(), colName.c_str(), table.c_str(), m_name.c_str()); return false; } sql.append(';'); return executeDDL(db, sql); } /** * Execute a DDL statement against the SQLite database * * @param db The SQLite database handle * @param sql The SQLBuffer to execute * @return bool True if the statement succeeded */ bool Schema::executeDDL(sqlite3 *db, SQLBuffer& sql) { const char *ddl = sql.coalesce(); Logger *logger = Logger::getLogger(); logger->debug("Schema %s: Execute DDL %s", m_name.c_str(), ddl); char *errMsg = NULL; int rc, retries = 0; if (((rc = sqlite3_exec(db, ddl, NULL, NULL, &errMsg)) == SQLITE_BUSY || rc == SQLITE_LOCKED) && ++retries < 10) { int interval = retries * DDL_BACKOFF; usleep(interval); } if (rc != SQLITE_OK) { logger->error("Schema %s, failed to execute DDL %s, %s", m_name.c_str(), ddl, errMsg ? errMsg : "no reason available"); if (errMsg) { sqlite3_free(errMsg); } return false; } return true; } /** * Look in the JSON definition of a schema and check for the existance of a table * * @param doc The JSON document that defines the schema * @param table The name of the table name to look for * @return bool True if the table exists in the schema */ bool Schema::hasTable(const Document& doc, const string& tableName) { if (!hasArray(doc, "tables")) { return false; } const Value& tables = doc["tables"]; for (auto& table : tables.GetArray()) { if (hasString(table, "name")) { string name = table["name"].GetString(); if (name.compare(tableName) == 0) { return true; } } } return false; } /** * Look in the JSON definition of a schema and check for the existance of a column within a table * * @param doc The JSON document that defines the schema * @param tableName The name of the table name to look for * @param columnName The name of the column name to look for * @return bool True if the table exists in the schema */ bool Schema::hasColumn(const Document& doc, const string& tableName, const string& columnName) { if (!hasArray(doc, "tables")) { return false; } const Value& tables = doc["tables"]; for (auto& table : tables.GetArray()) { if (hasString(table, "name")) { string name = table["name"].GetString(); if (name.compare(tableName) == 0) { if (hasArray(table, "columns")) { const Value& columns = table["columns"]; for (auto& column : columns.GetArray()) { if (hasString(column, "column")) { string col = column["column"].GetString(); if (col.compare(columnName) == 0) { return true; } } } } } } } return false; } /** * Setup the path of the schema database file */ void Schema::setDatabasePath() { char *data = getenv("FLEDGE_DATA"); if (!data) { m_schemaPath = getenv("FLEDGE_ROOT"); m_schemaPath +="/data"; } else { m_schemaPath = data; } m_schemaPath += "/"; m_schemaPath += m_name; m_schemaPath += ".db"; } /** * Create the SQLite database and enable the WAL mode for the database * * @return bool Returns true on success */ bool Schema::createDatabase() { sqlite3 *dbHandle; int rc = sqlite3_open(m_schemaPath.c_str(), &dbHandle); if (rc != SQLITE_OK) { Logger::getLogger()->error("Failed to create database for schema %s", m_name.c_str()); return false; } if ((rc = sqlite3_exec(dbHandle, DB_CONFIGURATION, NULL, NULL, NULL)) != SQLITE_OK) { Logger::getLogger()->error("Unable to set database configuration for schema %s", m_name.c_str()); return false; } sqlite3_close(dbHandle); return true; } /** * Attach the schema to the database handle if not already attached * * @param db The database handle to attach the schema to * @return bool True if the schema was attached */ bool Schema::attach(sqlite3 *db) { if (m_attached.find(db) != m_attached.end()) { // Already attached return true; } SQLBuffer sql; sql.append("ATTACH DATABASE '"); sql.append(m_schemaPath); sql.append("' AS "); sql.append(m_name); sql.append(';'); if (!executeDDL(db, sql)) { return false; } m_attached[db] = true; return true; } ================================================ FILE: C/plugins/storage/sqlitelb/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(sqlitelb) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) # Path of compiled sqlite3 file: /usr/local/bin set(FLEDGE_SQLITE3_LIBS "/usr/local/bin" CACHE INTERNAL "") # Find source files file(GLOB SOURCES ./common/*.cpp ../sqlite/schema/*.cpp *.cpp) # Include header files include_directories(./include) include_directories(./common/include) include_directories(../sqlite/schema/include) include_directories(../sqlite/common/include) include_directories(../../../common/include) include_directories(../../../services/common/include) include_directories(../common/include) include_directories(../../../thirdparty/rapidjson/include) link_directories(${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Check Sqlite3 required version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}") find_package(sqlite3) # Commented out as it is the persistent storage #add_definitions(-DSQLITE_SPLIT_READINGS=1) add_definitions(-DPLUGIN_LOG_NAME="SQLite 3 lb") # Use static SQLite3 library if(EXISTS ${FLEDGE_SQLITE3_LIBS}) include_directories(${FLEDGE_SQLITE3_LIBS}) target_link_libraries(${PROJECT_NAME} -L"${FLEDGE_SQLITE3_LIBS}/.libs" -lsqlite3) else() target_link_libraries(${PROJECT_NAME} -lsqlite3) endif() # Install SQLite3 command line with static library if(EXISTS ${FLEDGE_SQLITE3_LIBS}) install(PROGRAMS ${FLEDGE_SQLITE3_LIBS}/sqlite3 DESTINATION "fledge/plugins/storage/${PROJECT_NAME}") endif() # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/plugins/storage/${PROJECT_NAME}) # Install init.sql install(FILES ${CMAKE_SOURCE_DIR}/scripts/plugins/storage/${PROJECT_NAME}/init.sql DESTINATION fledge/plugins/storage/${PROJECT_NAME}) install(FILES ${CMAKE_SOURCE_DIR}/scripts/plugins/storage/${PROJECT_NAME}/init_readings.sql DESTINATION fledge/plugins/storage/${PROJECT_NAME}) ================================================ FILE: C/plugins/storage/sqlitelb/Findsqlite3.cmake ================================================ # This CMake file locates the SQLite3 development libraries # # The following variables are set: # SQLITE_FOUND - If the SQLite library was found # SQLITE_LIBRARIES - Path to the static library # SQLITE_INCLUDE_DIR - Path to SQLite headers # SQLITE_VERSION - Library version set(SQLITE_MIN_VERSION "3.11.0") # Check wether path of compiled libsqlite3.a and .h files exists if (EXISTS ${FLEDGE_SQLITE3_LIBS}) find_path(SQLITE_INCLUDE_DIR sqlite3.h PATHS ${FLEDGE_SQLITE3_LIBS}) find_library(SQLITE_LIBRARIES NAMES libsqlite3.a PATHS "${FLEDGE_SQLITE3_LIBS}/.libs") else() find_path(SQLITE_INCLUDE_DIR sqlite3.h) find_library(SQLITE_LIBRARIES NAMES libsqlite3.so) endif() if (SQLITE_INCLUDE_DIR AND SQLITE_LIBRARIES) execute_process(COMMAND grep ".*#define.*SQLITE_VERSION " ${SQLITE_INCLUDE_DIR}/sqlite3.h COMMAND sed "s/.*\"\\(.*\\)\".*/\\1/" OUTPUT_VARIABLE SQLITE_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) if ("${SQLITE_VERSION}" VERSION_LESS "${SQLITE_MIN_VERSION}") message(FATAL_ERROR "SQLite3 version >= ${SQLITE_MIN_VERSION} required, found version ${SQLITE_VERSION}") else() message(STATUS "Found SQLite version ${SQLITE_VERSION}: ${SQLITE_LIBRARIES}") set(SQLITE_FOUND TRUE) endif() else() message(FATAL_ERROR "Could not find SQLite") endif() ================================================ FILE: C/plugins/storage/sqlitelb/common/connection.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <connection.h> #include <connection_manager.h> #include <sqlite_common.h> #include <utils.h> #ifndef MEMORY_READING_PLUGIN #include <schema.h> #endif /* * Control the way purge deletes readings. The block size sets a limit as to how many rows * get deleted in each call, whilst the sleep interval controls how long the thread sleeps * between deletes. The idea is to not keep the database locked too long and allow other threads * to have access to the database between blocks. */ #define PURGE_SLEEP_MS 500 #define PURGE_DELETE_BLOCK_SIZE 20 #define TARGET_PURGE_BLOCK_DEL_TIME (70*1000) // 70 msec #define PURGE_BLOCK_SZ_GRANULARITY 5 // 5 rows #define MIN_PURGE_DELETE_BLOCK_SIZE 20 #define MAX_PURGE_DELETE_BLOCK_SIZE 1500 #define RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS 30 // recalculate purge block size after every 30 blocks #define PURGE_SLOWDOWN_AFTER_BLOCKS 5 #define PURGE_SLOWDOWN_SLEEP_MS 500 /** * SQLite3 storage plugin for Fledge */ using namespace std; using namespace rapidjson; #define CONNECT_ERROR_THRESHOLD 5*60 // 5 minutes /* * The following allows for conditional inclusion of code that tracks the top queries * run by the storage plugin and the number of times a particular statement has to * be retried because of the database being busy./ */ #define DO_PROFILE 0 #define DO_PROFILE_RETRIES 0 #if DO_PROFILE #include <profile.h> #define TOP_N_STATEMENTS 10 // Number of statements to report in top n #define RETRY_REPORT_THRESHOLD 1000 // Report retry statistics every X calls QueryProfile profiler(TOP_N_STATEMENTS); unsigned long retryStats[MAX_RETRIES] = { 0,0,0,0,0,0,0,0,0,0 }; unsigned long numStatements = 0; int maxQueue = 0; #endif static std::atomic<int> m_waiting(0); static std::atomic<int> m_writeAccessOngoing(0); static std::mutex db_mutex; static std::condition_variable db_cv; static int purgeBlockSize = PURGE_DELETE_BLOCK_SIZE; #define START_TIME std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); #define END_TIME std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); \ auto usecs = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); static time_t connectErrorTime = 0; /** * This SQLIte3 query callback returns a formatted date * by SELECT strftime('format', column, 'locatime') * * @param data Output parameter to update with new datetime * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int dateCallback(void *data, int nCols, char **colValues, char **colNames) { if (colValues[0] != NULL) { memcpy((char *)data, colValues[0], strlen(colValues[0])); // OK return 0; } else { // Failure return 1; } } /** * Retrieves the current datetime (now ()) from SQlite * * @param Now Output parameter - now () * @return True, operations succeded * */ bool Connection::getNow(string& Now) { bool retCode; char* zErrMsg = NULL; char nowDate[100] = ""; string nowSqlCMD = "SELECT " SQLITE3_NOW_READING; int rc = SQLexec(dbHandle, "now", nowSqlCMD.c_str(), dateCallback, nowDate, &zErrMsg); if (rc == SQLITE_OK ) { Now = nowDate; retCode = true; } else { Logger::getLogger()->error("SELECT NOW() error :%s:", nowSqlCMD.c_str(), zErrMsg); sqlite3_free(zErrMsg); Now = ""; retCode = false; } return retCode; } //### #########################################################################################: /** * Apply Fledge default datetime formatting * to a detected DATETIME datatype column * * @param pStmt Current SQLite3 result set * @param i Current column index * @param Output parameter for new date * @return True is format has been applied, * False otherwise */ bool Connection::applyColumnDateTimeFormat(sqlite3_stmt *pStmt, int i, string& newDate) { bool apply_format = false; string formatStmt = {}; if (sqlite3_column_database_name(pStmt, i) != NULL && sqlite3_column_table_name(pStmt, i) != NULL) { if ((strcmp(sqlite3_column_origin_name(pStmt, i), "user_ts") == 0) && (strcmp(sqlite3_column_table_name(pStmt, i), "readings") == 0) && (strlen((char *) sqlite3_column_text(pStmt, i)) == 32)) { // Extract milliseconds and microseconds for the user_ts field of the readings table formatStmt = string("SELECT strftime('"); formatStmt += string(F_DATEH24_SEC); formatStmt += "', '" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "')"; formatStmt += " || substr('" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "', instr('" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "', '.'), 7)"; apply_format = true; } else { /** * Handle here possible unformatted DATETIME column type * If (column_name == column_original_name) AND * (sqlite3_column_table_name() == "DATETIME") * we assume the column has not been formatted * by any datetime() or strftime() SQLite function. * Thus we apply default FLEDGE formatting: * "%Y-%m-%d %H:%M:%f" */ if (sqlite3_column_database_name(pStmt, i) != NULL && sqlite3_column_table_name(pStmt, i) != NULL && (strcmp(sqlite3_column_origin_name(pStmt, i), sqlite3_column_name(pStmt, i)) == 0)) { const char *pzDataType; int retType = sqlite3_table_column_metadata(dbHandle, sqlite3_column_database_name(pStmt, i), sqlite3_column_table_name(pStmt, i), sqlite3_column_name(pStmt, i), &pzDataType, NULL, NULL, NULL, NULL); // Check whether to Apply dateformat if (pzDataType != NULL && retType == SQLITE_OK && strcmp(pzDataType, SQLITE3_FLEDGE_DATETIME_TYPE) == 0 && strcmp(sqlite3_column_origin_name(pStmt, i), sqlite3_column_name(pStmt, i)) == 0) { // Column metadata found and column datatype is "pzDataType" formatStmt = string("SELECT strftime('"); formatStmt += string(F_DATEH24_MS); formatStmt += "', '" + string((char *) sqlite3_column_text(pStmt, i)); formatStmt += "')"; apply_format = true; } else { // Format not done // Just log the error if present if (retType != SQLITE_OK) { Logger::getLogger()->error("SQLite3 failed " \ "to call sqlite3_table_column_metadata() " \ "for column '%s'", sqlite3_column_name(pStmt, i)); } } } } } if (apply_format) { char* zErrMsg = NULL; // New formatted data char formattedData[100] = ""; // Exec the format SQL int rc = SQLexec(dbHandle, "date", formatStmt.c_str(), dateCallback, formattedData, &zErrMsg); if (rc == SQLITE_OK ) { // Use new formatted datetime value newDate.assign(formattedData); return true; } else { Logger::getLogger()->error("SELECT dateformat '%s': error %s", formatStmt.c_str(), zErrMsg); sqlite3_free(zErrMsg); } } return false; } /** * Apply the specified date format * using the available formats in SQLite3 * for a specific column * * If the requested format is not available * the input column is used as is. * Additionally milliseconds could be rounded * upon request. * The routine return false if date format is not * found and the caller might decide to raise an error * or use the non formatted value * * @param inFormat Input date format from application * @param colName The column name to format * @param outFormat The formatted column * @return True if format has been applied or * false id no format is in use. */ bool applyColumnDateFormat(const string& inFormat, const string& colName, string& outFormat, bool roundMs) { bool retCode; // Get format, if any, from the supported formats map const string format = sqliteDateFormat[inFormat]; if (!format.empty()) { // Apply found format via SQLite3 strftime() outFormat.append("strftime('"); outFormat.append(format); outFormat.append("', "); // Check whether we have to round milliseconds if (roundMs == true && format.back() == 'f') { outFormat.append("cast(round((julianday("); outFormat.append(colName); outFormat.append(") - 2440587.5)*86400 -0.00005, 3) AS FLOAT), 'unixepoch'"); } else { outFormat.append(colName); } outFormat.append(" )"); retCode = true; } else { // Use column as is outFormat.append(colName); retCode = false; } return retCode; } /** * Apply the specified date format * using the available formats in SQLite3 * for a specific column * * If the requested format is not available * the input column is used as is. * Additionally milliseconds could be rounded * upon request. * The routine return false if date format is not * found and the caller might decide to raise an error * or use the non formatted value * * @param inFormat Input date format from application * @param colName The column name to format * @param outFormat The formatted column * @return True if format has been applied or * false id no format is in use. */ bool applyColumnDateFormatLocaltime(const string& inFormat, const string& colName, string& outFormat, bool roundMs) { bool retCode; // Get format, if any, from the supported formats map const string format = sqliteDateFormat[inFormat]; if (!format.empty()) { // Apply found format via SQLite3 strftime() outFormat.append("strftime('"); outFormat.append(format); outFormat.append("', "); // Check whether we have to round milliseconds if (roundMs == true && format.back() == 'f') { outFormat.append("cast(round((julianday("); outFormat.append(colName); outFormat.append(") - 2440587.5)*86400 -0.00005, 3) AS FLOAT), 'unixepoch'"); } else { outFormat.append(colName); } outFormat.append(", 'localtime')"); retCode = true; } else { // Use column as is outFormat.append(colName); retCode = false; } return retCode; } /** * Apply the specified date format * using the available formats in SQLite3 * * @param inFormat Input date format from application * @param outFormat The formatted column * @return True if format has been applied or * false */ bool applyDateFormat(const string& inFormat, string& outFormat) { bool retCode; // Get format, if any, from the supported formats map const string format = sqliteDateFormat[inFormat]; if (!format.empty()) { // Apply found format via SQLite3 strftime() outFormat.append("strftime('"); outFormat.append(format); outFormat.append("', "); return true; } else { return false; } } #ifndef SQLITE_SPLIT_READINGS /** * Create a SQLite3 database connection */ Connection::Connection() : m_purgeBlockSize(10000) { string dbPath, dbPathReadings; const char *defaultConnection = getenv("DEFAULT_SQLITE_DB_FILE"); const char *defaultReadingsConnection = getenv("DEFAULT_SQLITE_DB_READINGS_FILE"); m_logSQL = false; m_queuing = 0; m_streamOpenTransaction = true; if (defaultConnection == NULL) { // Set DB base path dbPath = getDataDir(); // Add the filename dbPath += _DB_NAME; } else { dbPath = defaultConnection; } if (defaultReadingsConnection == NULL) { // Set DB base path dbPathReadings = getDataDir(); // Add the filename dbPathReadings += READINGS_DB_FILE_NAME; } else { dbPathReadings = defaultReadingsConnection; } // Allow usage of URI for filename sqlite3_config(SQLITE_CONFIG_URI, 1); Logger *logger = Logger::getLogger(); /** * Make a connection to the database * and check backend connection was successfully made * Note: * we assume the database already exists, so the flag * SQLITE_OPEN_CREATE is not added in sqlite3_open_v2 call */ if (sqlite3_open_v2(dbPath.c_str(), &dbHandle, SQLITE_OPEN_READWRITE | SQLITE_OPEN_NOMUTEX, NULL) != SQLITE_OK) { const char* dbErrMsg = sqlite3_errmsg(dbHandle); const char* errMsg = "Failed to open the SQLite3 database"; Logger::getLogger()->error("%s '%s': %s", dbErrMsg, dbPath.c_str(), dbErrMsg); connectErrorTime = time(0); raiseError("Connection", "%s '%s': '%s'", dbErrMsg, dbPath.c_str(), dbErrMsg); sqlite3_close_v2(dbHandle); dbHandle = NULL; } else { int rc; char *zErrMsg = NULL; string dbConfiguration = "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;"; // Enable the WAL for the fledge DB rc = sqlite3_exec(dbHandle, dbConfiguration.c_str(), NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { string errMsg = "Failed to set WAL from the fledge DB - " + dbConfiguration; Logger::getLogger()->error("%s : error %s", dbConfiguration.c_str(), zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); } /* * Build the ATTACH DATABASE command in order to get * 'fledge.' prefix in all SQL queries */ SQLBuffer attachDb; attachDb.append("ATTACH DATABASE '"); attachDb.append(dbPath + "' AS fledge;"); const char *sqlStmt = attachDb.coalesce(); zErrMsg = NULL; // Exec the statement rc = SQLexec(dbHandle, "database", sqlStmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to attach 'fledge' database in"; Logger::getLogger()->error("%s '%s': error %s", errMsg, sqlStmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Connected to SQLite3 database: %s", dbPath.c_str()); } //Release sqlStmt buffer delete[] sqlStmt; bool initialiseReadings = false; if (access(dbPathReadings.c_str(), R_OK) == -1) { sqlite3 *dbHandle; // Readings do not exist so set flag to initialise rc = sqlite3_open(dbPathReadings.c_str(), &dbHandle); if(rc != SQLITE_OK) { } else { // Enables the WAL feature rc = sqlite3_exec(dbHandle, DB_CONFIGURATION, NULL, NULL, NULL); } sqlite3_close(dbHandle); initialiseReadings = true; } // Attach readings database SQLBuffer attachReadingsDb; attachReadingsDb.append("ATTACH DATABASE '"); attachReadingsDb.append(dbPathReadings + "' AS readings;"); const char *sqlReadingsStmt = attachReadingsDb.coalesce(); // Exec the statement rc = SQLexec(dbHandle, "database", sqlReadingsStmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to attach 'readings' database in"; Logger::getLogger()->error("%s '%s': error %s", errMsg, sqlReadingsStmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Connected to SQLite3 database: %s", dbPath.c_str()); } //Release sqlStmt buffer delete[] sqlReadingsStmt; if (initialiseReadings) { // Would really like to run an external script here, but until we have that // worked out we have the SQL needed to create the table and indexes // Need to initialise the readings SQLBuffer initReadings; initReadings.append("CREATE TABLE readings.readings ("); initReadings.append("id INTEGER PRIMARY KEY AUTOINCREMENT,"); initReadings.append("asset_code character varying(50) NOT NULL,"); initReadings.append("reading JSON NOT NULL DEFAULT '{}',"); initReadings.append("user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW')),"); initReadings.append("ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW'))"); initReadings.append(");"); const char *sqlReadingsStmt = initReadings.coalesce(); // Exec the statement zErrMsg = NULL; rc = SQLexec(dbHandle, "readings creation", sqlReadingsStmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to create 'readings' table, "; Logger::getLogger()->error("%s '%s': error %s", errMsg, sqlReadingsStmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Initialised readings database"); } //Release sqlStmt buffer delete[] sqlReadingsStmt; SQLBuffer index1; index1.append("CREATE INDEX readings.fki_readings_fk1 ON readings (asset_code, user_ts desc);"); const char *sqlIndex1Stmt = index1.coalesce(); // Exec the statement zErrMsg = NULL; rc = SQLexec(dbHandle, "readings creation", sqlIndex1Stmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to create 'readings' index, "; Logger::getLogger()->error("%s '%s': error %s", errMsg, sqlIndex1Stmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Initialised readings database"); } //Release sqlStmt buffer delete[] sqlIndex1Stmt; SQLBuffer index2; index2.append("CREATE INDEX readings.readings_ix2 ON readings (asset_code);"); const char *sqlIndex2Stmt = index2.coalesce(); // Exec the statement zErrMsg = NULL; rc = SQLexec(dbHandle, "readings creation", sqlIndex2Stmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to create 'readings' index, "; Logger::getLogger()->error("%s '%s': error %s", errMsg, sqlIndex2Stmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Initialised readings database"); } //Release sqlStmt buffer delete[] sqlIndex2Stmt; SQLBuffer index3; index3.append("CREATE INDEX readings.readings_ix3 ON readings (user_ts);"); const char *sqlIndex3Stmt = index3.coalesce(); // Exec the statement zErrMsg = NULL; rc = SQLexec(dbHandle, "readings creation", sqlIndex3Stmt, NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to create 'readings' index, "; Logger::getLogger()->error("%s '%s': error %s", errMsg, sqlIndex3Stmt, zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Initialised readings database"); } //Release sqlStmt buffer delete[] sqlIndex3Stmt; } // Enable the WAL for the readings DB rc = sqlite3_exec(dbHandle, dbConfiguration.c_str(),NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { string errMsg = "Failed to set WAL from the readings DB - " + dbConfiguration; Logger::getLogger()->error("%s : error %s", errMsg.c_str(), zErrMsg); connectErrorTime = time(0); sqlite3_free(zErrMsg); } } m_schemaManager = SchemaManager::getInstance(); } #endif /** * Destructor for the database connection. * Close the connection to SQLite3 db */ Connection::~Connection() { sqlite3_close_v2(dbHandle); } /** * Enable or disable the tracing of SQL statements * * @param flag Desired state of the SQL trace flag */ void Connection::setTrace(bool flag) { m_logSQL = flag; } /** * Map a SQLite3 result set to a string version of a JSON document * * @param res Sqlite3 result set * @param resultSet Output Json as string * @return SQLite3 result code of sqlite3_step(res) * */ int Connection::mapResultSet(void* res, string& resultSet) { // Cast to SQLite3 result set sqlite3_stmt* pStmt = (sqlite3_stmt *)res; // JSON generic document Document doc; // SQLite3 return code int rc; // Number of returned rows, number of columns unsigned long nRows = 0, nCols = 0; // Create the JSON document doc.SetObject(); // Get document allocator Document::AllocatorType& allocator = doc.GetAllocator(); // Create the array for returned rows Value rows(kArrayType); // Rows counter, set it to 0 now Value count; count.SetInt(0); // Iterate over all the rows in the resultSet while ((rc = SQLstep(pStmt)) == SQLITE_ROW) { // Get number of columns for current row nCols = sqlite3_column_count(pStmt); // Create the 'row' object Value row(kObjectType); // Build the row with all fields for (int i = 0; i < nCols; i++) { // JSON document for the current row Document d; // Set object name as the column name Value name(sqlite3_column_name(pStmt, i), allocator); // Get the "TEXT" value of the column value char* str = (char *)sqlite3_column_text(pStmt, i); // Check the column value datatype switch (sqlite3_column_type(pStmt, i)) { case (SQLITE_NULL): { row.AddMember(name, "", allocator); break; } case (SQLITE3_TEXT): { /** * Handle here possible unformatted DATETIME column type */ string newDate; if (applyColumnDateTimeFormat(pStmt, i, newDate)) { // Use new formatted datetime value str = (char *)newDate.c_str(); } Value value; if (!d.Parse(str).HasParseError()) { if (d.IsNumber()) { // Set string value = Value(str, allocator); } else { // JSON parsing ok, use the document // if string value is not "null", "true", "false" if (strcmp(str, "null") != 0 && strcmp(str, "true") != 0 && strcmp(str, "false") != 0) { value = Value(d, allocator); } else { // Use (char *) value for "null", "true", "false" value = Value(str, allocator); } } } else { // Use (char *) value value = Value(str, allocator); } // Add name & value to the current row row.AddMember(name, value, allocator); break; } case (SQLITE_INTEGER): { int64_t intVal = atol(str); // Add name & value to the current row row.AddMember(name, intVal, allocator); break; } case (SQLITE_FLOAT): { double dblVal = atof(str); // Add name & value to the current row row.AddMember(name, dblVal, allocator); break; } default: { // Default: use (char *) value Value value(str != NULL ? str : "", allocator); // Add name & value to the current row row.AddMember(name, value, allocator); break; } } } // All fields added: increase row counter nRows++; // Add the current row to the all rows object rows.PushBack(row, allocator); } // All rows added: update rows count count.SetInt(nRows); // Add 'rows' and 'count' to the final JSON document doc.AddMember("count", count, allocator); doc.AddMember("rows", rows, allocator); /* Write out the JSON document we created */ StringBuffer buffer; Writer<StringBuffer> writer(buffer); doc.Accept(writer); // Set the result as a CPP string resultSet = buffer.GetString(); // Return SQLite3 ret code return rc; } /** * This SQLIte3 query callback just returns the number of rows seen * by a SELECT statement in the 'data' parameter * * @param data Output parameter to update with number of rows * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int selectCallback(void *data, int nCols, char **colValues, char **colNames) { int *nRows = (int *)data; // Increment the number of rows seen *nRows++; // Set OK return 0; } /** * This SQLIte3 query count callback just returns the number of rows * as per 'count(*)' column * by a SELECT statement in the 'data' parameter * * @param data Output parameter to update with number of rows * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int countCallback(void *data, int nCols, char **colValues, char **colNames) { int *nRows = (int *)data; // Return the value of the first column: the count(*) *nRows = atoi(colValues[0]); // Set OK return 0; } /** * This SQLIte3 query rowid callback just returns the rowid * by a SELECT statement in the 'data' parameter * * @param data Output parameter to update with rowid * @param nCols The number of columns or the row * @param colValues The column values * @param colNames The column names * @return 0 on success, 1 otherwise */ int rowidCallback(void *data, int nCols, char **colValues, char **colNames) { unsigned long *rowid = (unsigned long *)data; // Return the value of the first column: the count(*) if (colValues[0]) *rowid = strtoul(colValues[0], NULL, 10); else *rowid = 0; // Set OK return 0; } #ifndef SQLITE_SPLIT_READINGS /** * Perform a query against a common table * */ bool Connection::retrieve(const string& schema, const string& table, const string& condition, string& resultSet) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; // Extra constraints to add to where clause SQLBuffer jsonConstraints; vector<string> asset_codes; if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("retrieve", "Schema %s does not exist, unable to retrieve from table %s", schema.c_str(), table.c_str()); return false; } try { if (dbHandle == NULL) { raiseError("retrieve", "No SQLite 3 db connection available"); return false; } if (condition.empty()) { sql.append("SELECT * FROM "); sql.append(schema); sql.append('.'); sql.append(table); } else { if (document.Parse(condition.c_str()).HasParseError()) { raiseError("retrieve", "Failed to parse JSON payload"); return false; } if (document.HasMember("aggregate")) { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } if (!jsonAggregates(document, document["aggregate"], sql, jsonConstraints, false)) { return false; } sql.append(" FROM "); sql.append(schema); sql.append('.'); } else if (document.HasMember("join")) { sql.append("SELECT "); selectColumns(document, sql, 0); } else if (document.HasMember("return")) { int col = 0; Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { sql.append(itr->GetString()); } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } // SQLite 3 date format. string new_format; applyColumnDateFormat((*itr)["format"].GetString(), (*itr)["column"].GetString(), new_format, true); // Add the formatted column or use it as is sql.append(new_format); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } // SQLite3 doesnt support time zone formatting if (strcasecmp((*itr)["timezone"].GetString(), "utc") != 0) { raiseError("retrieve", "SQLite3 plugin does not support timezones in qeueries"); return false; } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", 'utc')"); } } else { sql.append((*itr)["column"].GetString()); } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } sql.append(" FROM "); sql.append(schema); sql.append('.'); } else { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } sql.append(" * FROM "); sql.append(schema); sql.append('.'); } if (document.HasMember("join")) { sql.append(" FROM "); sql.append(schema); sql.append('.'); sql.append(table); sql.append(" t0"); appendTables(schema, document, sql, 1); } else { sql.append(table); } if (document.HasMember("where")) { sql.append(" WHERE "); if (document.HasMember("join")) { if (!jsonWhereClause(document["where"], sql, asset_codes, false, "t0.")) { return false; } // Now and the join condition itself string col0, col1; const Value& join = document["join"]; if (join.HasMember("on") && join["on"].IsString()) { col0 = join["on"].GetString(); } else { raiseError("rerieve", "Missing on item"); return false; } if (join.HasMember("table")) { const Value& table = join["table"]; if (table.HasMember("column") && table["column"].IsString()) { col1 = table["column"].GetString(); } else { raiseError("QueryTable", "Missing column in join table"); return false; } } sql.append(" AND t0."); sql.append(col0); sql.append(" = t1."); sql.append(col1); sql.append(" "); if (join.HasMember("query") && join["query"].IsObject()) { sql.append("AND "); const Value& query = join["query"]; processJoinQueryWhereClause(query, sql, asset_codes, 1); } } else if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes, false)) { raiseError("retrieve", "Failed to add where clause"); return false; } } else { raiseError("retrieve", "JSON does not contain where clause"); return false; } if (! jsonConstraints.isEmpty()) { sql.append(" AND "); const char *jsonBuf = jsonConstraints.coalesce(); sql.append(jsonBuf); delete[] jsonBuf; } } if (!jsonModifiers(document, sql, false)) { return false; } } sql.append(';'); const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; logSQL("CommonRetrive", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; // Failure return false; } // Release memory for 'query' var delete[] query; // Success return true; } catch (exception e) { raiseError("retrieve", "Internal error: %s", e.what()); } return false; } #endif #ifndef SQLITE_SPLIT_READINGS /** * Insert data into a table */ int Connection::insert(const std::string& schema, const std::string& table, const std::string& data) { Document document; ostringstream convert; sqlite3_stmt *stmt; int rc; std::size_t arr = data.find("inserts"); if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("insert", "Schema %s does not exist, unable to insert into table %s", schema.c_str(), table.c_str()); return false; } // Check first the 'inserts' property in JSON data bool stdInsert = (arr == std::string::npos || arr > 8); // If input data is not an array of iserts // create an array with one element if (stdInsert) { convert << "{ \"inserts\" : [ "; convert << data; convert << " ] }"; } if (document.Parse(stdInsert ? convert.str().c_str() : data.c_str()).HasParseError()) { raiseError("insert", "Failed to parse JSON payload\n"); return -1; } // Get the array with row(s) Value &inserts = document["inserts"]; if (!inserts.IsArray()) { raiseError("insert", "Payload is missing the inserts array"); return -1; } // Number of inserts int ins = 0; int failedInsertCount = 0; // Generate sql query for prepared statement for (Value::ConstValueIterator iter = inserts.Begin(); iter != inserts.End(); ++iter) { if (!iter->IsObject()) { raiseError("insert", "Each entry in the insert array must be an object"); return -1; } { int col = 0; SQLBuffer sql; SQLBuffer values; sql.append("INSERT INTO " + schema + "." + table + " ("); for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); itr != (*iter).MemberEnd(); ++itr) { // Append column name if (col) { sql.append(", "); } sql.append(itr->name.GetString()); col++; } sql.append(") VALUES ("); for ( auto i = 0 ; i < col; i++ ) { if (i) { sql.append(","); } sql.append("?"); } sql.append(");"); const char *query = sql.coalesce(); rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); if (rc != SQLITE_OK) { raiseError("insert", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); return -1; } // Bind columns with prepared sql query int columID = 1; for (Value::ConstMemberIterator itr = (*iter).MemberBegin(); itr != (*iter).MemberEnd(); ++itr) { if (itr->value.IsString()) { const char *str = itr->value.GetString(); if (strcmp(str, "now()") == 0) { sqlite3_bind_text(stmt, columID, SQLITE3_NOW, -1, SQLITE_TRANSIENT); } else { sqlite3_bind_text(stmt, columID, str, -1, SQLITE_TRANSIENT); } } else if (itr->value.IsDouble()) { sqlite3_bind_double(stmt, columID,itr->value.GetDouble()); } else if (itr->value.IsInt64()) { sqlite3_bind_int(stmt, columID,(long)itr->value.GetInt64()); } else if (itr->value.IsInt()) { sqlite3_bind_int(stmt, columID,itr->value.GetInt()); } else if (itr->value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); itr->value.Accept(writer); sqlite3_bind_text(stmt, columID, buffer.GetString(), -1, SQLITE_TRANSIENT); } columID++ ; } if (sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { raiseError("insert", sqlite3_errmsg(dbHandle)); return -1; } m_writeAccessOngoing.fetch_add(1); int sqlite3_resut = SQLstep(stmt); m_writeAccessOngoing.fetch_sub(1); if (sqlite3_resut == SQLITE_DONE) { sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } else { failedInsertCount++; raiseError("insert", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", sqlite3_expanded_sql(stmt)); // transaction is still open, do rollback if (sqlite3_get_autocommit(dbHandle) == 0) { rc = sqlite3_exec(dbHandle,"ROLLBACK TRANSACTION;",NULL,NULL,NULL); if (rc != SQLITE_OK) { raiseError("insert rollback", sqlite3_errmsg(dbHandle)); } } } if (sqlite3_exec(dbHandle, "COMMIT TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { raiseError("insert", sqlite3_errmsg(dbHandle)); return -1; } delete[] query; } // Increment row count ins++; } sqlite3_finalize(stmt); if (m_writeAccessOngoing == 0) db_cv.notify_all(); if (failedInsertCount) { char buf[100]; snprintf(buf, sizeof(buf), "Not all inserts into table '%s.%s' within transaction succeeded", schema.c_str(), table.c_str()); raiseError("insert", buf); } return (!failedInsertCount ? ins : -1); } #endif #ifndef SQLITE_SPLIT_READINGS /** * Perform an update against a common table * This routine uses SQLite 3 JSON1 extension: * * json_set(field, '$.key.value', the_value) * */ int Connection::update(const string& schema, const string& table, const string& payload) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; bool allowZero = false; vector<string> asset_codes; int row = 0; ostringstream convert; if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("update", "Schema %s does not exist, unable to update table %s", schema.c_str(), table.c_str()); return false; } std::size_t arr = payload.find("updates"); bool changeReqd = (arr == std::string::npos || arr > 8); if (changeReqd) { convert << "{ \"updates\" : [ "; convert << payload; convert << " ] }"; } if (document.Parse(changeReqd?convert.str().c_str():payload.c_str()).HasParseError()) { raiseError("update", "Failed to parse JSON payload"); return -1; } else { Value &updates = document["updates"]; if (!updates.IsArray()) { raiseError("update", "Payload is missing the updates array"); return -1; } sql.append("BEGIN TRANSACTION;"); int i=0; for (Value::ConstValueIterator iter = updates.Begin(); iter != updates.End(); ++iter,++i) { if (!iter->IsObject()) { raiseError("update", "Each entry in the update array must be an object"); return -1; } sql.append("UPDATE "); sql.append(schema); sql.append('.'); sql.append(table); sql.append(" SET "); int col = 0; if ((*iter).HasMember("values")) { const Value& values = (*iter)["values"]; for (Value::ConstMemberIterator itr = values.MemberBegin(); itr != values.MemberEnd(); ++itr) { if (col != 0) { sql.append( ", "); } sql.append(itr->name.GetString()); sql.append(" = "); if (itr->value.IsString()) { const char *str = itr->value.GetString(); if (strcmp(str, "now()") == 0) { sql.append(SQLITE3_NOW); } else { sql.append('\''); sql.append(escape(str)); sql.append('\''); } } else if (itr->value.IsDouble()) sql.append(itr->value.GetDouble()); else if (itr->value.IsUint64()) sql.append((unsigned long)itr->value.GetUint64()); else if (itr->value.IsInt64()) sql.append((long)itr->value.GetInt64()); else if (itr->value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); itr->value.Accept(writer); sql.append('\''); sql.append(escape(buffer.GetString())); sql.append('\''); } // Handle JSON value null: "item" : null else if (itr->value.IsNull()) { sql.append("NULL"); } col++; } } if ((*iter).HasMember("expressions")) { const Value& exprs = (*iter)["expressions"]; if (!exprs.IsArray()) { raiseError("update", "The property exressions must be an array"); return -1; } for (Value::ConstValueIterator itr = exprs.Begin(); itr != exprs.End(); ++itr) { if (col != 0) { sql.append( ", "); } if (!itr->IsObject()) { raiseError("update", "expressions must be an array of objects"); return -1; } if (!itr->HasMember("column")) { raiseError("update", "Missing column property in expressions array item"); return -1; } if (!itr->HasMember("operator")) { raiseError("update", "Missing operator property in expressions array item"); return -1; } if (!itr->HasMember("value")) { raiseError("update", "Missing value property in expressions array item"); return -1; } sql.append((*itr)["column"].GetString()); sql.append(" = "); sql.append((*itr)["column"].GetString()); sql.append(' '); sql.append((*itr)["operator"].GetString()); sql.append(' '); const Value& value = (*itr)["value"]; if (value.IsString()) { const char *str = value.GetString(); if (strcmp(str, "now()") == 0) { sql.append(SQLITE3_NOW); } else { sql.append('\''); sql.append(str); sql.append('\''); } } else if (value.IsDouble()) sql.append(value.GetDouble()); else if (value.IsInt64()) sql.append((long)value.GetInt64()); else if (value.IsInt()) sql.append(value.GetInt()); else if (value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); sql.append('\''); sql.append(buffer.GetString()); sql.append('\''); } col++; } } if ((*iter).HasMember("json_properties")) { const Value& exprs = (*iter)["json_properties"]; if (!exprs.IsArray()) { raiseError("update", "The property json_properties must be an array"); return -1; } for (Value::ConstValueIterator itr = exprs.Begin(); itr != exprs.End(); ++itr) { if (col != 0) { sql.append( ", "); } if (!itr->IsObject()) { raiseError("update", "json_properties must be an array of objects"); return -1; } if (!itr->HasMember("column")) { raiseError("update", "Missing column property in json_properties array item"); return -1; } if (!itr->HasMember("path")) { raiseError("update", "Missing path property in json_properties array item"); return -1; } if (!itr->HasMember("value")) { raiseError("update", "Missing value property in json_properties array item"); return -1; } sql.append((*itr)["column"].GetString()); // SQLite 3 JSON1 extension: json_set // json_set(field, '$.key.value', the_value) sql.append(" = json_set("); sql.append((*itr)["column"].GetString()); sql.append(", '$."); const Value& path = (*itr)["path"]; if (!path.IsArray()) { raiseError("update", "The property path must be an array"); return -1; } int pathElement = 0; for (Value::ConstValueIterator itr2 = path.Begin(); itr2 != path.End(); ++itr2) { if (pathElement > 0) { sql.append('.'); } if (itr2->IsString()) { sql.append(itr2->GetString()); } else { raiseError("update", "The elements of path must all be strings"); return -1; } pathElement++; } sql.append("', "); const Value& value = (*itr)["value"]; if (value.IsString()) { const char *str = value.GetString(); if (strcmp(str, "now()") == 0) { sql.append(SQLITE3_NOW); } else { sql.append('\''); sql.append(escape(str)); sql.append('\''); } } else if (value.IsDouble()) { sql.append(value.GetDouble()); } else if (value.IsInt64()) { sql.append((long)value.GetInt64()); } else if (value.IsInt()) { sql.append(value.GetInt()); } else if (value.IsObject()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); value.Accept(writer); sql.append('\''); sql.append(escape(buffer.GetString())); sql.append('\''); } sql.append(")"); col++; } } if (iter->HasMember("modifier") && (*iter)["modifier"].IsArray()) { const Value& modifier = (*iter)["modifier"]; for (Value::ConstValueIterator modifiers = modifier.Begin(); modifiers != modifier.End(); ++modifiers) { if (modifiers->IsString()) { string mod = modifiers->GetString(); if (mod.compare("allowzero") == 0) { allowZero = true; } } } } if (col == 0) { raiseError("update", "Missing values or expressions object in payload"); return -1; } if ((*iter).HasMember("condition")) { sql.append(" WHERE "); if (!jsonWhereClause((*iter)["condition"], sql, asset_codes)) { return false; } } else if ((*iter).HasMember("where")) { sql.append(" WHERE "); if (!jsonWhereClause((*iter)["where"], sql, asset_codes)) { return false; } } sql.append(';'); row++; } } sql.append("COMMIT TRANSACTION;"); const char *query = sql.coalesce(); logSQL("CommonUpdate", query); char *zErrMsg = NULL; int rc; // Exec the UPDATE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); rc = SQLexec(dbHandle, table, query, NULL, NULL, &zErrMsg); m_writeAccessOngoing.fetch_sub(1); if (m_writeAccessOngoing == 0) db_cv.notify_all(); // Check result code if (rc != SQLITE_OK) { raiseError("update", zErrMsg); sqlite3_free(zErrMsg); if (sqlite3_get_autocommit(dbHandle)==0) // transaction is still open, do rollback { rc=SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { raiseError("rollback", zErrMsg); sqlite3_free(zErrMsg); } } Logger::getLogger()->error("SQL statement: %s", query); // Release memory for 'query' var delete[] query; return -1; } else { // Release memory for 'query' var delete[] query; int update = sqlite3_changes(dbHandle); int return_value=0; if (update == 0 && allowZero == false) { char buf[100]; snprintf(buf, sizeof(buf), "Not all updates within transaction '%s.%s' succeeded", schema.c_str(), table.c_str()); raiseError("update", buf); return_value = -1; } else { return_value = (row == 1 ? update : row); } // Returns the number of rows affected, cases : // // 1) update == 0, no update, returns -1 // 2) single command SQL that could affects multiple rows, returns 'update' // 3) multiple SQL commands packed and executed in one SQLexec, returns 'row' return (return_value); } // Return failure return -1; } #endif /** * Format a date to a fixed format with milliseconds, microseconds and * timezone expressed, examples : * * case - formatted |2019-01-01 10:01:01.000000+00:00| date |2019-01-01 10:01:01| * case - formatted |2019-02-01 10:02:01.000000+00:00| date |2019-02-01 10:02:01.0| * case - formatted |2019-02-02 10:02:02.841000+00:00| date |2019-02-02 10:02:02.841| * case - formatted |2019-02-03 10:02:03.123456+00:00| date |2019-02-03 10:02:03.123456| * case - formatted |2019-03-01 10:03:01.100000+00:00| date |2019-03-01 10:03:01.1+00:00| * case - formatted |2019-03-02 10:03:02.123000+00:00| date |2019-03-02 10:03:02.123+00:00| * case - formatted |2019-03-03 10:03:03.123456+00:00| date |2019-03-03 10:03:03.123456+00:00| * case - formatted |2019-03-04 10:03:04.123456+01:00| date |2019-03-04 10:03:04.123456+01:00| * case - formatted |2019-03-05 10:03:05.123456-01:00| date |2019-03-05 10:03:05.123456-01:00| * case - formatted |2019-03-04 10:03:04.123456+02:30| date |2019-03-04 10:03:04.123456+02:30| * case - formatted |2019-03-05 10:03:05.123456-02:30| date |2019-03-05 10:03:05.123456-02:30| * * @param out false if the date is invalid * */ bool Connection::formatDate(char *formatted_date, size_t buffer_size, const char *date) { struct timeval tv = {0}; struct tm tm = {0}; char *valid_date = nullptr; enum codeOptimization{CO_NONE, CO_01, CO_02, CO_03}; codeOptimization opt; int len; // Code optimization for the cases: // // 2019-03-03 10:03:03.123456+00:00 // 2019-02-02 10:02:02.841 // 2019-01-01 10:01:01 len = strlen(date); if (len == 32) { if ( date[19] == '.' && (date[26] == '-' || date[26] == '+')&& date[29] == ':' ) { // Case - 2019-03-03 10:03:03.123456+00:00 strcpy(formatted_date, date); opt = CO_01; } else opt = CO_NONE; } else if (len == 23) { if ( date[19] == '.') { // Case - 2019-02-02 10:02:02.841 strcpy(formatted_date, date); strcat(formatted_date, "000+00:00"); opt = CO_02; } else opt = CO_NONE; } else if (len == 19) { // Case - 2019-01-01 10:01:01 strcpy(formatted_date, date); strcat(formatted_date, ".000000+00:00"); opt = CO_03; } else { opt = CO_NONE; } if (opt != CO_NONE) { return (true); } // Extract up to seconds memset(&tm, 0, sizeof(tm)); valid_date = strptime(date, F_DATEH24_SEC, &tm); if (! valid_date) { return (false); } strftime (formatted_date, buffer_size, F_DATEH24_SEC, &tm); // Work out the microseconds from the fractional part of the seconds char fractional[10] = {0}; sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%[0-9]*", fractional); // Truncate to max 6 digits fractional[6] = 0; int multiplier = 6 - (int)strlen(fractional); if (multiplier < 0) multiplier = 0; while (multiplier--) strcat(fractional, "0"); strcat(formatted_date ,"."); strcat(formatted_date ,fractional); // Handles timezone char timezone_hour[5] = {0}; char timezone_min[5] = {0}; char sign[2] = {0}; sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%*d-%2[0-9]:%2[0-9]", timezone_hour, timezone_min); if (timezone_hour[0] != 0) { strcat(sign, "-"); } else { memset(timezone_hour, 0, sizeof(timezone_hour)); memset(timezone_min, 0, sizeof(timezone_min)); sscanf(date, "%*d-%*d-%*d %*d:%*d:%*d.%*d+%2[0-9]:%2[0-9]", timezone_hour, timezone_min); if (timezone_hour[0] != 0) { strcat(sign, "+"); } else { // No timezone is expressed in the source date // the default UTC is added strcat(formatted_date, "+00:00"); } } if (sign[0] != 0) { if (timezone_hour[0] != 0) { strcat(formatted_date, sign); // Pad with 0 if an hour having only 1 digit was provided // +1 -> +01 if (strlen(timezone_hour) == 1) strcat(formatted_date, "0"); strcat(formatted_date, timezone_hour); strcat(formatted_date, ":"); } if (timezone_min[0] != 0) { strcat(formatted_date, timezone_min); // Pad with 0 if minutes having only 1 digit were provided // 3 -> 30 if (strlen(timezone_min) == 1) strcat(formatted_date, "0"); } else { // Minutes aren't expressed in the source date strcat(formatted_date, "00"); } } return (true); } /** * Process the aggregate options and return the columns to be selected */ bool Connection::jsonAggregates(const Value& payload, const Value& aggregates, SQLBuffer& sql, SQLBuffer& jsonConstraint, bool isTableReading) { if (aggregates.IsObject()) { if (! aggregates.HasMember("operation")) { raiseError("Select aggregation", "Missing property \"operation\""); return false; } if ((! aggregates.HasMember("column")) && (! aggregates.HasMember("json"))) { raiseError("Select aggregation", "Missing property \"column\" or \"json\""); return false; } sql.append(aggregates["operation"].GetString()); sql.append('('); if (aggregates.HasMember("column")) { string col = aggregates["column"].GetString(); if (col.compare("*") == 0) // Faster to count ROWID rather than * { sql.append("ROWID"); } else { // an operation different from the 'count' is requested if (isTableReading && (col.compare("user_ts") == 0) ) { sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); } else { sql.append("\""); sql.append(col); sql.append("\""); } } } else if (aggregates.HasMember("json")) { const Value& json = aggregates["json"]; if (! json.IsObject()) { raiseError("Select aggregation", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } // Use json_extract(field, '$.key1.key2') AS value sql.append("json_extract("); sql.append(json["column"].GetString()); sql.append(", '$."); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (jsonFields.IsArray()) { if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json keys NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); int field = 0; string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (field) { sql.append("."); } if (prev.length() > 0) { // Append Json field for NULL check jsonConstraint.append(prev); jsonConstraint.append("."); } prev = itr->GetString(); field++; // Append Json field for query sql.append(itr->GetString()); } // Add last Json key jsonConstraint.append(prev); // Add condition for all json keys not null jsonConstraint.append("') IS NOT NULL"); } else { // Append Json field for query sql.append(jsonFields.GetString()); if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json key NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); jsonConstraint.append(jsonFields.GetString()); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } sql.append("')"); } sql.append(") AS \""); if (aggregates.HasMember("alias")) { sql.append(aggregates["alias"].GetString()); } else { sql.append(aggregates["operation"].GetString()); sql.append('_'); sql.append(aggregates["column"].GetString()); } sql.append("\""); } else if (aggregates.IsArray()) { int index = 0; for (Value::ConstValueIterator itr = aggregates.Begin(); itr != aggregates.End(); ++itr) { if (!itr->IsObject()) { raiseError("select aggregation", "Each element in the aggregate array must be an object"); return false; } if ((! itr->HasMember("column")) && (! itr->HasMember("json"))) { raiseError("Select aggregation", "Missing property \"column\""); return false; } if (! itr->HasMember("operation")) { raiseError("Select aggregation", "Missing property \"operation\""); return false; } if (index) sql.append(", "); index++; sql.append((*itr)["operation"].GetString()); sql.append('('); if (itr->HasMember("column")) { string column_name= (*itr)["column"].GetString(); if (isTableReading && (column_name.compare("user_ts") == 0) ) { sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); } else { sql.append("\""); sql.append(column_name); sql.append("\""); } } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! json.IsObject()) { raiseError("Select aggregation", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // Use json_extract(field, '$.key1.key2') AS value sql.append("json_extract("); sql.append(json["column"].GetString()); sql.append(", '$."); // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json keys NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); if (jsonFields.IsArray()) { string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (prev.length() > 0) { jsonConstraint.append(prev); jsonConstraint.append('.'); sql.append('.'); } // Append Json field for query sql.append(itr->GetString()); prev = itr->GetString(); } // Add last Json key jsonConstraint.append(prev); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } else { // Append Json field for query sql.append(jsonFields.GetString()); // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json key NULL check jsonConstraint.append(jsonFields.GetString()); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } sql.append("')"); } sql.append(") AS \""); if (itr->HasMember("alias")) { sql.append((*itr)["alias"].GetString()); } else { sql.append((*itr)["operation"].GetString()); sql.append('_'); sql.append((*itr)["column"].GetString()); } sql.append("\""); } } if (payload.HasMember("group")) { sql.append(", "); if (payload["group"].IsObject()) { const Value& grp = payload["group"]; if (grp.HasMember("format")) { // SQLite 3 date format. string new_format; if (isTableReading) { applyColumnDateFormatLocaltime(grp["format"].GetString(), grp["column"].GetString(), new_format); } else { applyColumnDateFormat(grp["format"].GetString(), grp["column"].GetString(), new_format); } // Add the formatted column or use it as is sql.append(new_format); } else { sql.append(grp["column"].GetString()); } if (grp.HasMember("alias")) { sql.append(" AS \""); sql.append(grp["alias"].GetString()); sql.append("\""); } else { sql.append(" AS \""); sql.append(grp["column"].GetString()); sql.append("\""); } } else { sql.append(payload["group"].GetString()); } } if (payload.HasMember("timebucket")) { const Value& tb = payload["timebucket"]; if (! tb.IsObject()) { raiseError("Select data", "The \"timebucket\" property must be an object"); return false; } if (! tb.HasMember("timestamp")) { raiseError("Select data", "The \"timebucket\" object must have a timestamp property"); return false; } if (tb.HasMember("format")) { // SQLite 3 date format is limited. string new_format; if (applyDateFormat(tb["format"].GetString(), new_format)) { sql.append(", "); // Add the formatted column sql.append(new_format); if (tb.HasMember("size")) { // Use Unix epoch, without microseconds sql.append(tb["size"].GetString()); sql.append(" * round("); sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { sql.append(tb["timestamp"].GetString()); } sql.append(", 'unixepoch')"); } else { /** * No date format found: we should return an error. * Note: currently if input Json payload has no 'result' member * raiseError() results in no data being sent to the client * We use Unix epoch without microseconds */ sql.append(", datetime("); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); sql.append(" * round("); } // Use Unix epoch, without microseconds sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); if (tb.HasMember("size")) { sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { sql.append(")"); } sql.append(", 'unixepoch')"); } } else { sql.append(", datetime("); if (tb.HasMember("size")) { sql.append(tb["size"].GetString()); sql.append(" * round("); } /* * Default format when no format is specified: * - we use Unix time without milliseconds. */ sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); if (tb.HasMember("size")) { sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { sql.append(")"); } sql.append(", 'unixepoch')"); } sql.append(" AS \""); if (tb.HasMember("alias")) { sql.append(tb["alias"].GetString()); } else { sql.append("timestamp"); } sql.append('"'); } return true; } /** * Process the modifiers for limit, skip, sort and group */ bool Connection::jsonModifiers(const Value& payload, SQLBuffer& sql, bool isTableReading) { if (payload.HasMember("timebucket") && payload.HasMember("sort")) { raiseError("query modifiers", "Sort and timebucket modifiers can not be used in the same payload"); return false; } if (payload.HasMember("group")) { sql.append(" GROUP BY "); if (payload["group"].IsObject()) { const Value& grp = payload["group"]; if (grp.HasMember("format")) { /** * SQLite 3 date format is limited. * Handle all available formats here. */ string new_format; if (isTableReading) { applyColumnDateFormatLocaltime(grp["format"].GetString(), grp["column"].GetString(), new_format); } else { applyColumnDateFormat(grp["format"].GetString(), grp["column"].GetString(), new_format); } // Add the formatted column or use it as is sql.append(new_format); } } else { sql.append(payload["group"].GetString()); } } if (payload.HasMember("sort")) { sql.append(" ORDER BY "); const Value& sortBy = payload["sort"]; if (sortBy.IsObject()) { if (! sortBy.HasMember("column")) { raiseError("Select sort", "Missing property \"column\""); return false; } sql.append(sortBy["column"].GetString()); sql.append(' '); if (! sortBy.HasMember("direction")) { sql.append("ASC"); } else { sql.append(sortBy["direction"].GetString()); } } else if (sortBy.IsArray()) { int index = 0; for (Value::ConstValueIterator itr = sortBy.Begin(); itr != sortBy.End(); ++itr) { if (!itr->IsObject()) { raiseError("select sort", "Each element in the sort array must be an object"); return false; } if (! itr->HasMember("column")) { raiseError("Select sort", "Missing property \"column\""); return false; } if (index) { sql.append(", "); } index++; sql.append((*itr)["column"].GetString()); sql.append(' '); if (! itr->HasMember("direction")) { sql.append("ASC"); } else { sql.append((*itr)["direction"].GetString()); } } } } if (payload.HasMember("timebucket")) { const Value& tb = payload["timebucket"]; if (! tb.IsObject()) { raiseError("Select data", "The \"timebucket\" property must be an object"); return false; } if (! tb.HasMember("timestamp")) { raiseError("Select data", "The \"timebucket\" object must have a timestamp property"); return false; } if (payload.HasMember("group")) { sql.append(", "); } else { sql.append(" GROUP BY "); } // Divide by "size" if (tb.HasMember("size")) { // Use Unix epoch without milliseconds sql.append("round(strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(") / "); sql.append(tb["size"].GetString()); sql.append(", 6)"); } else { // Use Unix epoch sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(")"); } sql.append(" ORDER BY "); // Use Unix epoch without milliseconds sql.append("strftime('%s', "); sql.append(tb["timestamp"].GetString()); sql.append(")"); sql.append(" DESC"); } if (payload.HasMember("limit")) { if (!payload["limit"].IsInt()) { raiseError("limit", "Limit must be specfied as an integer"); return false; } sql.append(" LIMIT "); try { sql.append(payload["limit"].GetInt()); } catch (exception e) { raiseError("limit", "Bad value for limit parameter: %s", e.what()); return false; } } // OFFSET must go after LIMIT if (payload.HasMember("skip")) { // Add no limits if (!payload.HasMember("limit")) { sql.append(" LIMIT -1"); } if (!payload["skip"].IsInt()) { raiseError("skip", "Skip must be specfied as an integer"); return false; } sql.append(" OFFSET "); sql.append(payload["skip"].GetInt()); } return true; } /** * Convert a JSON where clause into a SQLite3 where clause * */ bool Connection::jsonWhereClause(const Value& whereClause, SQLBuffer& sql, std::vector<std::string> &asset_codes, bool convertLocaltime, string prefix) { if (!whereClause.IsObject()) { raiseError("where clause", "The \"where\" property must be a JSON object"); return false; } if (!whereClause.HasMember("column")) { raiseError("where clause", "The \"where\" object is missing a \"column\" property"); return false; } if (!whereClause.HasMember("condition")) { raiseError("where clause", "The \"where\" object is missing a \"condition\" property"); return false; } string column = whereClause["column"].GetString(); if (!prefix.empty()) { sql.append(prefix); } sql.append(column); sql.append(' '); string cond = whereClause["condition"].GetString(); if (cond.compare("isnull") == 0) { sql.append("isnull "); } else if (cond.compare("notnull") == 0) { sql.append("notnull "); } else { if (!whereClause.HasMember("value")) { raiseError("where clause", "The \"where\" object is missing a \"value\" property"); return false; } if (!cond.compare("older")) { if (!whereClause["value"].IsInt()) { raiseError("where clause", "The \"value\" of an \"older\" condition must be an integer"); return false; } sql.append("< datetime('now', '-"); sql.append(whereClause["value"].GetInt()); if (convertLocaltime) sql.append(" seconds', 'localtime')"); // Get value in localtime else sql.append(" seconds')"); // Get value in UTC by asking for no timezone } else if (!cond.compare("newer")) { if (!whereClause["value"].IsInt()) { raiseError("where clause", "The \"value\" of an \"newer\" condition must be an integer"); return false; } sql.append("> datetime('now', '-"); sql.append(whereClause["value"].GetInt()); if (convertLocaltime) sql.append(" seconds', 'localtime')"); // Get value in localtime else sql.append(" seconds')"); // Get value in UTC by asking for no timezone } else if (!cond.compare("in") || !cond.compare("not in")) { // Check we have a non empty array if (whereClause["value"].IsArray() && whereClause["value"].Size()) { sql.append(cond); sql.append(" ( "); int field = 0; for (Value::ConstValueIterator itr = whereClause["value"].Begin(); itr != whereClause["value"].End(); ++itr) { if (field) { sql.append(", "); } field++; if (itr->IsNumber()) { if (itr->IsInt()) { sql.append(itr->GetInt()); } else if (itr->IsInt64()) { sql.append((long)itr->GetInt64()); } else { sql.append(itr->GetDouble()); } } else if (itr->IsString()) { sql.append('\''); sql.append(escape(itr->GetString())); sql.append('\''); } else { string message("The \"value\" of a \"" + \ cond + \ "\" condition array element must be " \ "a string, integer or double."); raiseError("where clause", message.c_str()); return false; } } sql.append(" )"); } else { string message("The \"value\" of a \"" + \ cond + "\" condition must be an array " \ "and must not be empty."); raiseError("where clause", message.c_str()); return false; } } else { sql.append(cond); sql.append(' '); if (whereClause["value"].IsInt()) { sql.append(whereClause["value"].GetInt()); } else if (whereClause["value"].IsString()) { string value = whereClause["value"].GetString(); sql.append('\''); sql.append(escape(value)); sql.append('\''); // Identify a specific operation to restrinct the tables involved if (column.compare("asset_code") == 0) if ( cond.compare("=") == 0) asset_codes.push_back(value); } } } if (whereClause.HasMember("and")) { sql.append(" AND "); if (!jsonWhereClause(whereClause["and"], sql, asset_codes, convertLocaltime, prefix)) { return false; } } if (whereClause.HasMember("or")) { sql.append(" OR "); if (!jsonWhereClause(whereClause["or"], sql, asset_codes, convertLocaltime, prefix)) { return false; } } return true; } /** * This routine uses SQLit3 JSON1 extension functions */ bool Connection::returnJson(const Value& json, SQLBuffer& sql, SQLBuffer& jsonConstraint) { if (! json.IsObject()) { raiseError("retrieve", "The json property must be an object"); return false; } if (!json.HasMember("column")) { raiseError("retrieve", "The json property is missing a column property"); return false; } // Call JSON1 SQLite3 extension routine 'json_extract' // json_extract(field, '$.key1.key2') AS value sql.append("json_extract("); sql.append(json["column"].GetString()); sql.append(", '$."); if (!json.HasMember("properties")) { raiseError("retrieve", "The json property is missing a properties property"); return false; } const Value& jsonFields = json["properties"]; if (jsonFields.IsArray()) { if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json keys NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); int field = 0; string prev; for (Value::ConstValueIterator itr = jsonFields.Begin(); itr != jsonFields.End(); ++itr) { if (field) { sql.append("."); } if (prev.length()) { jsonConstraint.append(prev); jsonConstraint.append("."); } field++; // Append Json field for query sql.append(itr->GetString()); prev = itr->GetString(); } // Add last Json key jsonConstraint.append(prev); // Add condition for all json keys not null jsonConstraint.append("') IS NOT NULL"); } else { // Append Json field for query sql.append(jsonFields.GetString()); if (! jsonConstraint.isEmpty()) { jsonConstraint.append(" AND "); } // JSON1 SQLite3 extension 'json_type' object check: // json_type(field, '$.key1.key2') IS NOT NULL // Build the Json key NULL check jsonConstraint.append("json_type("); jsonConstraint.append(json["column"].GetString()); jsonConstraint.append(", '$."); jsonConstraint.append(jsonFields.GetString()); // Add condition for json key not null jsonConstraint.append("') IS NOT NULL"); } sql.append("') "); return true; } /** * Remove whitespace at both ends of a string */ char *Connection::trim(char *str) { char *ptr; while (*str && *str == ' ') str++; ptr = str + strlen(str) - 1; while (ptr > str && *ptr == ' ') { *ptr = 0; ptr--; } return str; } /** * Raise an error to return from the plugin */ void Connection::raiseError(const char *operation, const char *reason, ...) { ConnectionManager *manager = ConnectionManager::getInstance(); char tmpbuf[512]; va_list ap; va_start(ap, reason); vsnprintf(tmpbuf, sizeof(tmpbuf), reason, ap); va_end(ap); Logger::getLogger()->error("%s storage plugin raising error: %s", PLUGIN_LOG_NAME, tmpbuf); manager->setError(operation, tmpbuf, false); } /** * Return the sie of a given table in bytes */ long Connection::tableSize(const string& table) { SQLBuffer buf; raiseError("tableSize", "Not available in SQLite3 storage plugin"); return -1; } /** * String escape routine */ const string Connection::escape(const string& str) { char *buffer; const char *p1; char *p2; string newString; if (str.find_first_of('\'') == string::npos) { return str; } buffer = (char *)malloc(str.length() * 2); p1 = str.c_str(); p2 = buffer; while (*p1) { if (*p1 == '\'') { *p2++ = '\''; *p2++ = '\''; p1++; } else { *p2++ = *p1++; } } *p2 = 0; newString = string(buffer); free(buffer); return newString; } /** * Optionally log SQL statement execution * * @param tag A string tag that says why the SQL is being executed * @param stmt The SQL statement itself */ void Connection::logSQL(const char *tag, const char *stmt) { if (m_logSQL) { Logger::getLogger()->info("%s, %s: %s", PLUGIN_LOG_NAME, tag, stmt); } } /** * SQLITE wrapper to rety statements when the database is locked * * @param db The open SQLite database * @param sql The SQL to execute * @param callback Callback function * @param cbArg Callback 1st argument * @param errmsg Locaiton to write error message */ int Connection::SQLexec(sqlite3 *db, const string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg) { int retries = 0, rc; int interval; do { #if DO_PROFILE ProfileItem *prof = new ProfileItem(sql); #endif rc = sqlite3_exec(db, sql, callback, cbArg, errmsg); #if DO_PROFILE prof->complete(); profiler.insert(prof); #endif retries++; if (rc != SQLITE_OK) { #if DO_PROFILE_RETRIES m_qMutex.lock(); m_waiting.fetch_add(1); if (maxQueue < m_waiting) maxQueue = m_waiting; m_qMutex.unlock(); #endif interval = (1 * RETRY_BACKOFF); std::this_thread::sleep_for(std::chrono::milliseconds(interval)); if (retries > 5) { Logger::getLogger()->info( "SQLexec: retry %d of %d, rc=%s, errmsg=%s, DB connection @ %p, slept for %d msecs", retries, MAX_RETRIES, (rc == SQLITE_LOCKED) ? "SQLITE_LOCKED" : "SQLITE_BUSY", sqlite3_errmsg(db), this, interval); } #if DO_PROFILE_RETRIES m_qMutex.lock(); m_waiting.fetch_sub(1); m_qMutex.unlock(); #endif if (sqlite3_get_autocommit(db)==0) // if transaction is still open, do rollback { int rc2; char *zErrMsg = NULL; rc2=SQLexec(db, table, "ROLLBACK TRANSACTION;", NULL, NULL, &zErrMsg); if (rc2 != SQLITE_OK) { raiseError("rollback", zErrMsg); sqlite3_free(zErrMsg); } } } } while (retries < MAX_RETRIES && (rc != SQLITE_OK)); if (retries >1) { ostringstream threadId; threadId << std::this_thread::get_id(); Logger::getLogger()->debug("%s - Completed retries :%d: :%s:", __FUNCTION__, retries, threadId.str().c_str() ); } #if DO_PROFILE_RETRIES retryStats[retries-1]++; if (++numStatements > RETRY_REPORT_THRESHOLD - 1) { numStatements = 0; Logger *log = Logger::getLogger(); log->info("Storage layer statement retry profile"); for (int i = 0; i < MAX_RETRIES-1; i++) { log->info("%2d: %d", i, retryStats[i]); retryStats[i] = 0; } log->info("Too many retries: %d", retryStats[MAX_RETRIES-1]); retryStats[MAX_RETRIES-1] = 0; log->info("Maximum retry queue length: %d", maxQueue); maxQueue = 0; } #endif if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("Database still locked after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("Database still busy after maximum retries, executing %s operation on %s", operation(sql).c_str(), table.c_str()); } return rc; } int Connection::SQLstep(sqlite3_stmt *statement) { int retries = 0, rc; int interval; do { #if DO_PROFILE ProfileItem *prof = new ProfileItem(sqlite3_sql(statement)); #endif rc = sqlite3_step(statement); #if DO_PROFILE prof->complete(); profiler.insert(prof); #endif retries++; if (rc == SQLITE_LOCKED || rc == SQLITE_BUSY) { interval = (retries * RETRY_BACKOFF); std::this_thread::sleep_for(std::chrono::milliseconds(interval)); if (retries > 5) { Logger::getLogger()->info("SQLstep: retry %d of %d, rc=%s, DB connection @ %p, slept for %d msecs", retries, MAX_RETRIES, (rc==SQLITE_LOCKED)?"SQLITE_LOCKED":"SQLITE_BUSY", this, interval); } } } while (retries < MAX_RETRIES && (rc == SQLITE_LOCKED || rc == SQLITE_BUSY)); #if DO_PROFILE_RETRIES retryStats[retries-1]++; if (++numStatements > 1000) { numStatements = 0; Logger *log = Logger::getLogger(); log->info("Storage layer statement retry profile"); for (int i = 0; i < MAX_RETRIES-1; i++) { log->info("%2d: %d", i, retryStats[i]); retryStats[i] = 0; } log->info("Too many retries: %d", retryStats[MAX_RETRIES-1]); retryStats[MAX_RETRIES-1] = 0; } #endif if (retries >1) { ostringstream threadId; threadId << std::this_thread::get_id(); Logger::getLogger()->debug("%s - Completed retries :%d: :%s:", __FUNCTION__, retries, threadId.str().c_str() ); } if (rc == SQLITE_LOCKED) { Logger::getLogger()->error("Database still locked after maximum retries"); } if (rc == SQLITE_BUSY) { Logger::getLogger()->error("Database still busy after maximum retries"); } return rc; } #ifndef SQLITE_SPLIT_READINGS /** * Perform a delete against a common table * */ int Connection::deleteRows(const string& schema, const string& table, const string& condition) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; vector<string> asset_codes; if (!m_schemaManager->exists(dbHandle, schema)) { raiseError("delete", "Schema %s does not exist, unable to delete from table %s", schema.c_str(), table.c_str()); return false; } sql.append("DELETE FROM "); sql.append(schema); sql.append('.'); sql.append(table); if (! condition.empty()) { sql.append(" WHERE "); if (document.Parse(condition.c_str()).HasParseError()) { raiseError("delete", "Failed to parse JSON payload"); return -1; } else { if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes)) { return -1; } } else { raiseError("delete", "JSON does not contain where clause"); return -1; } } } sql.append(';'); const char *query = sql.coalesce(); logSQL("CommonDelete", query); char *zErrMsg = NULL; int delete_rows; int rc; // Exec the DELETE statement: no callback, no result set m_writeAccessOngoing.fetch_add(1); rc = SQLexec(dbHandle, table, query, NULL, NULL, &zErrMsg); m_writeAccessOngoing.fetch_sub(1); if (m_writeAccessOngoing == 0) db_cv.notify_all(); // Check result code if (rc == SQLITE_OK) { // Success. Release memory for 'query' var delete[] query; return sqlite3_changes(dbHandle); } else { raiseError("delete", zErrMsg); sqlite3_free(zErrMsg); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; // Failure return -1; } } #endif #ifndef SQLITE_SPLIT_READINGS /** * Create snapshot of a common table * * @param table The table to snapshot * @param id The snapshot id * @return -1 on error, >= 0 on success * * The new created table name has the name: * $table_snap$id */ int Connection::create_table_snapshot(const string& table, const string& id) { string query = "CREATE TABLE fledge."; query += table + "_snap" + id + " AS SELECT * FROM fledge." + table; logSQL("CreateTableSnapshot", query.c_str()); char* zErrMsg = NULL; int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, &zErrMsg); // Check result code if (rc == SQLITE_OK) { return 1; } else { raiseError("create_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); return -1; } } /** * Set the contents of a common table from a snapshot * * @param table The table to fill * @param id The snapshot id of the table * @return -1 on error, >= 0 on success * */ int Connection::load_table_snapshot(const string& table, const string& id) { string purgeQuery = "DELETE FROM fledge." + table; string query = "BEGIN TRANSACTION; "; query += purgeQuery +"; INSERT INTO fledge." + table; query += " SELECT * FROM fledge." + table + "_snap" + id; query += "; COMMIT TRANSACTION;"; logSQL("LoadTableSnapshot", query.c_str()); char* zErrMsg = NULL; int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, &zErrMsg); // Check result code if (rc == SQLITE_OK) { return 1; } else { raiseError("load_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); // transaction is still open, do rollback if (sqlite3_get_autocommit(dbHandle) == 0) { rc = SQLexec(dbHandle, table, "ROLLBACK TRANSACTION;", NULL, NULL, &zErrMsg); if (rc != SQLITE_OK) { raiseError("rollback for load_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); } } return -1; } } /** * Delete a snapshot of a common table * * @param table The table to snapshot * @param id The snapshot id * @return -1 on error, >= 0 on success * */ int Connection::delete_table_snapshot(const string& table, const string& id) { string query = "DROP TABLE fledge." + table + "_snap" + id; logSQL("DeleteTableSnapshot", query.c_str()); char* zErrMsg = NULL; int rc = SQLexec(dbHandle, table, query.c_str(), NULL, NULL, &zErrMsg); // Check result code if (rc == SQLITE_OK) { return 1; } else { raiseError("delete_table_snapshot", zErrMsg); sqlite3_free(zErrMsg); return -1; } } /** * Get list of snapshots for a given common table * * @param table The given table name */ bool Connection::get_table_snapshots(const string& table, string& resultSet) { SQLBuffer sql; try { if (dbHandle == NULL) { raiseError("retrieve", "No SQLite 3 db connection available"); return false; } sql.append("SELECT REPLACE(name, '"); sql.append(table); sql.append("_snap', '') AS id FROM sqlite_master WHERE type='table' AND name LIKE '"); sql.append(table); sql.append("_snap%';"); const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; logSQL("GetTableSnapshots", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); if (rc != SQLITE_OK) { raiseError("get_table_snapshots", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("get_table_snapshots", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); delete[] query; // Failure return false; } // Release memory for 'query' var delete[] query; // Success return true; } catch (exception e) { raiseError("get_table_snapshots", "Internal error: %s", e.what()); // Failure return false; } } /** * Create schema and populate with tables and indexes as defined in the JSON schema * definition. * * @param schema The schema defintion as a JSON document containing information about schema of tables to create * @return true if the schema was created */ bool Connection::createSchema(const std::string &schema) { return m_schemaManager->create(dbHandle, schema); } /** * Execute a SQLite VACUUM command on the database */ bool Connection::vacuum() { char* zErrMsg = NULL; // Exec the statement int rc = SQLexec(dbHandle, "database", "VACUUM;", NULL, NULL, &zErrMsg); // Check result if (rc != SQLITE_OK) { const char* errMsg = "Failed to vacuum database "; Logger::getLogger()->error("%s: error %s", errMsg, zErrMsg); sqlite3_free(zErrMsg); return false; } else { Logger::getLogger()->info("Database vacuum complete"); } return true; } #endif /* * Return the first word in a SQL statement, ie the operation that is being executed. * * @param sql The complete SQL statement * @return string The operation */ string Connection::operation(const char *sql) { const char *p1 = sql; char buf[40], *p2 = buf; while (*p1 && !isspace(*p1) && p2 - buf < 40) *p2++ = *p1++; *p2 = '\0'; return string(buf); } /** * In the case of a join add the tables to select from for all the tables in * the join * * @param schema The schema we are using * @param document The query we are processing * @param sql The SQLBuffer we are writing * @param level The table number we are processing */ bool Connection::appendTables(const string& schema, const Value& document, SQLBuffer& sql, int level) { string tag = "t" + to_string(level); if (document.HasMember("join")) { const Value& join = document["join"]; if (join.HasMember("table")) { const Value& table = join["table"]; if (!table.HasMember("name")) { raiseError("commonRetrieve", "Joining table is missing a table name"); return false; } const Value& name = table["name"]; if (!name.IsString()) { raiseError("commonRetrieve", "Joining table name is not a string"); return false; } sql.append(", "); sql.append(schema); sql.append('.'); sql.append(name.GetString()); sql.append(" "); sql.append(tag); if (join.HasMember("query")) { const Value& query = join["query"]; appendTables(schema, query, sql, ++level); } else { raiseError("commonRetrieve", "Join is missing a join query definition"); return false; } } else { raiseError("commonRetrieve", "Join is missing a table definition"); return false; } } return true; } /** * Recurse down and add the where cluase and join terms for each * new table joined to the query * * @param query The JSON query * @param sql The SQLBuffer we are writing the data to * @param asset_codes The asset codes * @param level The nestign level of the joined table */ bool Connection::processJoinQueryWhereClause(const Value& query, SQLBuffer& sql, std::vector<std::string> &asset_codes, int level) { string tag = "t" + to_string(level) + "."; if (!jsonWhereClause(query["where"], sql, asset_codes, true, tag)) { return false; } if (query.HasMember("join")) { // Now and the join condition itself string col0, col1; const Value& join = query["join"]; if (join.HasMember("on") && join["on"].IsString()) { col0 = join["on"].GetString(); } else { return false; } if (join.HasMember("table")) { const Value& table = join["table"]; if (table.HasMember("column") && table["column"].IsString()) { col1 = table["column"].GetString(); } else { raiseError("Joined query", "Missing join column in table"); return false; } } sql.append(" AND "); sql.append(tag); sql.append(col0); sql.append(" = t"); sql.append(level + 1); sql.append("."); sql.append(col1); sql.append(" "); if (join.HasMember("query") && join["query"].IsObject()) { sql.append(" AND "); const Value& query = join["query"]; processJoinQueryWhereClause(query, sql, asset_codes, level + 1); } } return true; } /** * In the case of a join add the columns to select from for all the tables in * the join * * @param document The query we are processing * @param sql The SQLBuffer we are writing * @param level The table number we are processing */ bool Connection::selectColumns(const Value& document, SQLBuffer& sql, int level) { SQLBuffer jsonConstraints; string tag = "t" + to_string(level) + "."; if (document.HasMember("return")) { int col = 0; const Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) { sql.append(", "); } if (!itr->IsObject()) // Simple column name { sql.append(tag); sql.append(itr->GetString()); } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("rerieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("rerieve", "format must be a string"); return false; } // SQLite 3 date format. string new_format; applyColumnDateFormat((*itr)["format"].GetString(), tag + (*itr)["column"].GetString(), new_format, true); // Add the formatted column or use it as is sql.append(new_format); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("rerieve", "timezone must be a string"); return false; } // SQLite3 doesnt support time zone formatting if (strcasecmp((*itr)["timezone"].GetString(), "utc") != 0) { raiseError("retrieve", "SQLite3 plugin does not support timezones in qeueries"); return false; } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append(tag); sql.append((*itr)["column"].GetString()); sql.append(", 'utc')"); } } else { sql.append(tag); sql.append((*itr)["column"].GetString()); } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) { return false; } } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } } else { sql.append('*'); return true; } if (document.HasMember("join")) { const Value& join = document["join"]; if (join.HasMember("query")) { const Value& query = join["query"]; sql.append(", "); if (!selectColumns(query, sql, ++level)) { raiseError("commonRetrieve", "Join failed to add select columns"); return false; } } } return true; } ================================================ FILE: C/plugins/storage/sqlitelb/common/connection_manager.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <connection_manager.h> #include <connection.h> #include <unistd.h> #include <disk_monitor.h> #include <utils.h> #include <sqlite_common.h> ConnectionManager *ConnectionManager::instance = 0; /** * Background thread entry point */ static void managerBackground(void *arg) { ConnectionManager *mgr = (ConnectionManager *)arg; mgr->background(); } /** * Default constructor for the connection manager. */ ConnectionManager::ConnectionManager() : m_shutdown(false), m_vacuumInterval(6 * 60 * 60), m_purgeBlockSize(10000), m_diskSpaceMonitor(NULL) { lastError.message = NULL; lastError.entryPoint = NULL; if (getenv("FLEDGE_TRACE_SQL")) m_trace = true; else m_trace = false; std::string dbPath, dbPathReadings; const char *defaultConnection = getenv("DEFAULT_SQLITE_DB_FILE"); const char *defaultReadingsConnection = getenv("DEFAULT_SQLITE_DB_READINGS_FILE"); if (defaultConnection == NULL) { // Set DB base path dbPath = getDataDir(); // Add the filename dbPath += _DB_NAME; } else { dbPath = defaultConnection; } if (defaultReadingsConnection == NULL) { // Set DB base path dbPathReadings = getDataDir(); // Add the filename dbPathReadings += READINGS_DB_FILE_NAME; } else { dbPathReadings = defaultReadingsConnection; } m_diskSpaceMonitor = new DiskSpaceMonitor(dbPath, dbPathReadings); m_background = new std::thread(managerBackground, this); } /** * Called at shutdown. Shrink the idle pool, this will * have the side effect of closing the connections to the database. */ void ConnectionManager::shutdown() { m_shutdown = true; shrinkPool(idle.size()); if (m_background) m_background->join(); } /** * Return the singleton instance of the connection manager. * if none was created then create it. */ ConnectionManager *ConnectionManager::getInstance() { if (instance == 0) { instance = new ConnectionManager(); } return instance; } /** * Set the purge block size in each of the connections * * @param purgeBlockSize The requested purgeBlockSize */ void ConnectionManager::setPurgeBlockSize(unsigned long purgeBlockSize) { m_purgeBlockSize = purgeBlockSize; idleLock.lock(); for (auto& c : idle) c->setPurgeBlockSize(purgeBlockSize); idleLock.unlock(); } /** * Grow the connection pool by the number of connections * specified. * * @param delta The number of connections to add to the pool */ void ConnectionManager::growPool(unsigned int delta) { while (delta-- > 0) { Connection *conn = new Connection(); conn->setPurgeBlockSize(m_purgeBlockSize); if (m_trace) conn->setTrace(true); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); } } /** * Attempt to shrink the number of connections in the idle pool * * @param delta Number of connections to attempt to remove * @return The number of connections removed. */ unsigned int ConnectionManager::shrinkPool(unsigned int delta) { unsigned int removed = 0; Connection *conn; while (delta-- > 0) { idleLock.lock(); conn = idle.back(); idle.pop_back(); idleLock.unlock(); if (conn) { delete conn; removed++; } else { break; } } return removed; } /** * Allocate a connection from the idle pool. If * no connection is available add a new connection */ Connection *ConnectionManager::allocate() { Connection *conn = 0; idleLock.lock(); if (idle.empty()) { conn = new Connection(); } else { conn = idle.front(); idle.pop_front(); } idleLock.unlock(); if (conn) { inUseLock.lock(); inUse.push_front(conn); inUseLock.unlock(); } return conn; } /** * Release a connection back to the idle pool for * reallocation. * * @param conn The connection to release. */ void ConnectionManager::release(Connection *conn) { inUseLock.lock(); inUse.remove(conn); inUseLock.unlock(); idleLock.lock(); idle.push_back(conn); idleLock.unlock(); } /** * Set the last error information for a plugin. * * @param source The source of the error * @param description The error description * @param retryable Flag to determien if the error condition is transient */ void ConnectionManager::setError(const char *source, const char *description, bool retryable) { errorLock.lock(); if (lastError.entryPoint) free(lastError.entryPoint); if (lastError.message) free(lastError.message); lastError.retryable = retryable; lastError.entryPoint = strdup(source); lastError.message = strdup(description); errorLock.unlock(); } /** * Background thread used to execute periodic tasks and oversee the database activity. * * We will runt he SQLite vacuum command periodically to allow space to be reclaimed */ void ConnectionManager::background() { time_t nextVacuum = time(0) + m_vacuumInterval; while (!m_shutdown) { if (m_diskSpaceMonitor) m_diskSpaceMonitor->periodic(15); // Called with the interval we sleep for sleep(15); time_t tim = time(0); if (m_vacuumInterval && tim > nextVacuum) { Connection *con = allocate(); con->vacuum(); release(con); nextVacuum = time(0) + m_vacuumInterval; } } } ================================================ FILE: C/plugins/storage/sqlitelb/common/include/connection.h ================================================ #ifndef _CONNECTION_H #define _CONNECTION_H /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sql_buffer.h> #include <string> #include <rapidjson/document.h> #include <sqlite3.h> #include <mutex> #include <vector> #include <reading_stream.h> #ifndef MEMORY_READING_PLUGIN #include <schema.h> #endif #define _DB_NAME "/fledge.db" #define READINGS_DB_NAME_BASE "readings" #define READINGS_DB_FILE_NAME "/" READINGS_DB_NAME_BASE ".db" #define READINGS_DB READINGS_DB_NAME_BASE #define READINGS_TABLE "readings" #define READINGS_TABLE_MEM READINGS_TABLE #define MAX_RETRIES 80 // Maximum no. of retries when a lock is encountered #define RETRY_BACKOFF 100 // Multipler to backoff DB retry on lock #define RETRY_BACKOFF_EXEC 1000 // Multipler to backoff DB retry on lock #define LEN_BUFFER_DATE 100 #define F_TIMEH24_S "%H:%M:%S" #define F_DATEH24_S "%Y-%m-%d %H:%M:%S" #define F_DATEH24_M "%Y-%m-%d %H:%M" #define F_DATEH24_H "%Y-%m-%d %H" // This is the default datetime format in Fledge: 2018-05-03 18:15:00.622 #define F_DATEH24_MS "%Y-%m-%d %H:%M:%f" // Format up to seconds #define F_DATEH24_SEC "%Y-%m-%d %H:%M:%S" #define SQLITE3_NOW "strftime('%Y-%m-%d %H:%M:%f', 'now', 'localtime')" // The default precision is milliseconds, it adds microseconds and timezone #define SQLITE3_NOW_READING "strftime('%Y-%m-%d %H:%M:%f000+00:00', 'now')" #define SQLITE3_FLEDGE_DATETIME_TYPE "DATETIME" // Set plugin name for log messages #ifndef PLUGIN_LOG_NAME #define PLUGIN_LOG_NAME "SQLite3" #endif int dateCallback(void *data, int nCols, char **colValues, char **colNames); bool applyColumnDateFormat(const std::string& inFormat, const std::string& colName, std::string& outFormat, bool roundMs = false); bool applyColumnDateFormatLocaltime(const std::string& inFormat, const std::string& colName, std::string& outFormat, bool roundMs = false); int rowidCallback(void *data, int nCols, char **colValues, char **colNames); int selectCallback(void *data, int nCols, char **colValues, char **colNames); int countCallback(void *data, int nCols, char **colValues, char **colNames); bool applyDateFormat(const std::string& inFormat, std::string& outFormat); class Connection { public: Connection(); ~Connection(); #ifndef SQLITE_SPLIT_READINGS bool createSchema(const std::string& schema); bool retrieve(const std::string& schema, const std::string& table, const std::string& condition, std::string& resultSet); int insert(const std::string& schema, const std::string& table, const std::string& data); int update(const std::string& schema, const std::string& table, const std::string& data); int deleteRows(const std::string& schema, const std::string& table, const std::string& condition); int create_table_snapshot(const std::string& table, const std::string& id); int load_table_snapshot(const std::string& table, const std::string& id); int delete_table_snapshot(const std::string& table, const std::string& id); bool get_table_snapshots(const std::string& table, std::string& resultSet); #endif int appendReadings(const char *readings); int readingStream(ReadingStream **readings, bool commit); bool fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet); bool retrieveReadings(const std::string& condition, std::string& resultSet); unsigned int purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& results); unsigned int purgeReadingsByRows(unsigned long rowcount, unsigned int flags, unsigned long sent, std::string& results); long tableSize(const std::string& table); void setTrace(bool); bool formatDate(char *formatted_date, size_t formatted_date_size, const char *date); bool aggregateQuery(const rapidjson::Value& payload, std::string& resultSet); bool getNow(std::string& Now); unsigned int purgeReadingsAsset(const std::string& asset); bool vacuum(); #ifdef MEMORY_READING_PLUGIN bool loadDatabase(const std::string& filname); bool saveDatabase(const std::string& filname); #endif void setPurgeBlockSize(unsigned long purgeBlockSize) { m_purgeBlockSize = purgeBlockSize; }; private: #ifndef MEMORY_READING_PLUGIN SchemaManager *m_schemaManager; #endif bool m_streamOpenTransaction; int m_queuing; std::mutex m_qMutex; unsigned long m_purgeBlockSize; std::string operation(const char *sql); int SQLexec(sqlite3 *db, const std::string& table, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg); int SQLstep(sqlite3_stmt *statement); bool m_logSQL; void raiseError(const char *operation, const char *reason,...); sqlite3 *dbHandle; int mapResultSet(void *res, std::string& resultSet); bool jsonWhereClause(const rapidjson::Value& whereClause, SQLBuffer&, std::vector<std::string> &asset_codes, bool convertLocaltime = false, std::string prefix = ""); bool jsonModifiers(const rapidjson::Value&, SQLBuffer&, bool isTableReading = false); bool jsonAggregates(const rapidjson::Value&, const rapidjson::Value&, SQLBuffer&, SQLBuffer&, bool isTableReading = false); bool returnJson(const rapidjson::Value&, SQLBuffer&, SQLBuffer&); char *trim(char *str); const std::string escape(const std::string&); bool applyColumnDateTimeFormat(sqlite3_stmt *pStmt, int i, std::string& newDate); void logSQL(const char *, const char *); bool appendTables(const std::string& schema, const rapidjson::Value& document, SQLBuffer& sql, int level); bool processJoinQueryWhereClause(const rapidjson::Value& query, SQLBuffer& sql, std::vector<std::string> &asset_codes, int level); bool selectColumns(const rapidjson::Value& document, SQLBuffer& sql, int level); }; #endif ================================================ FILE: C/plugins/storage/sqlitelb/common/include/connection_manager.h ================================================ #ifndef _CONNECTION_MANAGER_H #define _CONNECTION_MANAGER_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_api.h> #include <list> #include <mutex> #include <thread> class Connection; class DiskSpaceMonitor; /** * Singleton class to manage SQLite3 connection pool */ class ConnectionManager { public: static ConnectionManager *getInstance(); void growPool(unsigned int); unsigned int shrinkPool(unsigned int); Connection *allocate(); void release(Connection *); void shutdown(); void setError(const char *, const char *, bool); PLUGIN_ERROR *getError() { return &lastError; } void background(); void setVacuumInterval(long hours) { m_vacuumInterval = 60 * 60 * hours; }; void setPersist(bool persist, const std::string& filename = "") { m_persist = persist; m_filename = filename; } bool persist() { return m_persist; }; std::string filename() { return m_filename; }; void setPurgeBlockSize(unsigned long purgeBlockSize); protected: ConnectionManager(); private: static ConnectionManager *instance; protected: std::list<Connection *> idle; std::list<Connection *> inUse; std::mutex idleLock; std::mutex inUseLock; std::mutex errorLock; PLUGIN_ERROR lastError; bool m_trace; bool m_shutdown; std::thread *m_background; long m_vacuumInterval; bool m_persist; std::string m_filename; unsigned long m_purgeBlockSize; DiskSpaceMonitor *m_diskSpaceMonitor; }; #endif ================================================ FILE: C/plugins/storage/sqlitelb/common/readings.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <math.h> #include <connection.h> #include <connection_manager.h> #include <sqlite_common.h> #include <reading_stream.h> #include <random> // 1 enable performance tracking #define INSTRUMENT 0 #if INSTRUMENT #include <sys/time.h> #endif /* * The number of readings to insert in a single prepared statement */ #define APPEND_BATCH_SIZE 100 /* * JSON parsing requires a lot of mempry allocation, which is slow and causes * bottlenecks with thread synchronisation. RapidJSON supports in-situ parsing * whereby it will reuse the storage of the string it is parsing to store the * keys and string values of the parsed JSON. This is destructive on the buffer. * However it can be quicker to maek a copy of the raw string and then do in-situ * parsing on that copy of the string. * See http://rapidjson.org/md_doc_dom.html#InSituParsing * * Define a threshold length for the append readings to switch to using in-situ * parsing of the JSON to save on memory allocation overheads. Define as 0 to * disable the in-situ parsing. */ #define INSITU_THRESHOLD 10240 // Decode stream data #define RDS_USER_TIMESTAMP(stream, x) stream[x]->userTs #define RDS_ASSET_CODE(stream, x) stream[x]->assetCode #define RDS_PAYLOAD(stream, x) &(stream[x]->assetCode[0]) + stream[x]->assetCodeLength // Retry mechanism #define PREP_CMD_MAX_RETRIES 100 // Maximum no. of retries when a lock is encountered #define PREP_CMD_RETRY_BASE 20 // Base time to wait for #define PREP_CMD_RETRY_BACKOFF 20 // Variable time to wait for /* * Control the way purge deletes readings. The block size sets a limit as to how many rows * get deleted in each call, whilst the sleep interval controls how long the thread sleeps * between deletes. The idea is to not keep the database locked too long and allow other threads * to have access to the database between blocks. */ #define PURGE_SLEEP_MS 500 #define PURGE_DELETE_BLOCK_SIZE 20 #define TARGET_PURGE_BLOCK_DEL_TIME (70*1000) // 70 msec #define PURGE_BLOCK_SZ_GRANULARITY 5 // 5 rows #define MIN_PURGE_DELETE_BLOCK_SIZE 20 #define MAX_PURGE_DELETE_BLOCK_SIZE 1500 #define RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS 30 // recalculate purge block size after every 30 blocks #define PURGE_SLOWDOWN_AFTER_BLOCKS 5 #define PURGE_SLOWDOWN_SLEEP_MS 500 #define SECONDS_PER_DAY "86400.0" // 2440587.5 is the julian day at 1/1/1970 0:00 UTC. #define JULIAN_DAY_START_UNIXTIME "2440587.5" //#ifndef PLUGIN_LOG_NAME //#define PLUGIN_LOG_NAME "SQLite 3" //#endif /** * SQLite3 storage plugin for Fledge */ using namespace std; using namespace rapidjson; #define CONNECT_ERROR_THRESHOLD 5*60 // 5 minutes /* * The following allows for conditional inclusion of code that tracks the top queries * run by the storage plugin and the number of times a particular statement has to * be retried because of the database being busy./ */ #define DO_PROFILE 0 #define DO_PROFILE_RETRIES 0 #if DO_PROFILE #include <profile.h> #define TOP_N_STATEMENTS 10 // Number of statements to report in top n #define RETRY_REPORT_THRESHOLD 1000 // Report retry statistics every X calls QueryProfile profiler(TOP_N_STATEMENTS); unsigned long retryStats[MAX_RETRIES] = { 0,0,0,0,0,0,0,0,0,0 }; unsigned long numStatements = 0; int maxQueue = 0; #endif static std::atomic<int> m_waiting(0); static std::atomic<int> m_writeAccessOngoing(0); static std::mutex db_mutex; static std::condition_variable db_cv; static int purgeBlockSize = PURGE_DELETE_BLOCK_SIZE; #define START_TIME std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now(); #define END_TIME std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now(); \ auto usecs = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count(); static time_t connectErrorTime = 0; /** * Check whether to compute timebucket query with min,max,avg for all datapoints * * @param payload JSON payload * @return True if aggregation is 'all' */ bool aggregateAll(const Value& payload) { if (payload.HasMember("aggregate") && payload["aggregate"].IsObject()) { const Value& agg = payload["aggregate"]; if (agg.HasMember("operation") && (strcmp(agg["operation"].GetString(), "all") == 0)) { return true; } } return false; } /** * Build, exucute and return data of a timebucket query with min,max,avg for all datapoints * * @param payload JSON object for timebucket query * @param resultSet JSON Output buffer * @return True of success, false on any error */ bool Connection::aggregateQuery(const Value& payload, string& resultSet) { vector<string> asset_codes; if (!payload.HasMember("where") || !payload.HasMember("timebucket")) { raiseError("retrieve", "aggregateQuery is missing " "'where' and/or 'timebucket' properties"); return false; } SQLBuffer sql; sql.append("SELECT asset_code, "); double size = 1; string timeColumn; // Check timebucket object if (payload.HasMember("timebucket")) { const Value& bucket = payload["timebucket"]; if (!bucket.HasMember("timestamp")) { raiseError("retrieve", "aggregateQuery is missing " "'timestamp' property for 'timebucket'"); return false; } // Time column timeColumn = bucket["timestamp"].GetString(); // Bucket size if (bucket.HasMember("size")) { size = atof(bucket["size"].GetString()); if (!size) { size = 1; } } // Time format for output string newFormat; if (bucket.HasMember("format") && size >= 1) { applyColumnDateFormatLocaltime(bucket["format"].GetString(), "timestamp", newFormat, true); sql.append(newFormat); } else { if (size < 1) { // sub-second granularity to time bucket size: // force output formatting with microseconds newFormat = "strftime('%Y-%m-%d %H:%M:%S', " + timeColumn + ", 'localtime') || substr(" + timeColumn + ", instr(" + timeColumn + ", '.'), 7)"; sql.append(newFormat); } else { sql.append("timestamp"); } } // Time output alias if (bucket.HasMember("alias")) { sql.append(" AS "); sql.append(bucket["alias"].GetString()); } } // JSON format aggregated data sql.append(", '{' || group_concat('\"' || x || '\" : ' || resd, ', ') || '}' AS reading "); // subquery sql.append("FROM ( SELECT x, asset_code, max(timestamp) AS timestamp, "); // Add min sql.append("'{\"min\" : ' || min(theval) || ', "); // Add max sql.append("\"max\" : ' || max(theval) || ', "); // Add avg sql.append("\"average\" : ' || avg(theval) || ', "); // Add count sql.append("\"count\" : ' || count(theval) || ', "); // Add sum sql.append("\"sum\" : ' || sum(theval) || '}' AS resd "); if (size < 1) { // Add max(user_ts) sql.append(", max(" + timeColumn + ") AS " + timeColumn + " "); } // subquery sql.append("FROM ( SELECT asset_code, "); sql.append(timeColumn); if (size >= 1) { sql.append(", datetime("); } else { sql.append(", ("); } // Size formatted string string size_format; if (fmod(size, 1.0) == 0.0) { size_format = to_string(int(size)); } else { size_format = to_string(size); } // Add timebucket size // Unix Time is (Julian Day - JulianDay(1/1/1970 0:00 UTC) * Seconds_per_day if (size != 1) { sql.append(size_format); sql.append(" * round((julianday("); sql.append(timeColumn); sql.append(") - " + string(JULIAN_DAY_START_UNIXTIME) + ") * " + string(SECONDS_PER_DAY) + " / "); sql.append(size_format); sql.append(")"); } else { sql.append("round((julianday("); sql.append(timeColumn); sql.append(") - " + string(JULIAN_DAY_START_UNIXTIME) + ") * " + string(SECONDS_PER_DAY) + " / 1)"); } if (size >= 1) { sql.append(", 'unixepoch') AS \"timestamp\", reading, "); } else { sql.append(") AS \"timestamp\", reading, "); } // Get all datapoints in 'reading' field sql.append("json_each.key AS x, json_each.value AS theval FROM " READINGS_DB_NAME_BASE ".readings, json_each(readings.reading) "); // Add where condition sql.append("WHERE "); if (!jsonWhereClause(payload["where"], sql, asset_codes)) { raiseError("retrieve", "aggregateQuery: failure while building WHERE clause"); return false; } // close subquery sql.append(") tmp "); // Add group by // Unix Time is (Julian Day - JulianDay(1/1/1970 0:00 UTC) * Seconds_per_day sql.append(" GROUP BY x, asset_code, "); sql.append("round((julianday("); sql.append(timeColumn); sql.append(") - " + string(JULIAN_DAY_START_UNIXTIME) + ") * " + string(SECONDS_PER_DAY) + " / "); if (size != 1) { sql.append(size_format); } else { sql.append('1'); } sql.append(") "); // close subquery sql.append(") tbl "); // Add final group and sort sql.append("GROUP BY timestamp, asset_code ORDER BY timestamp DESC"); // Add limit if (payload.HasMember("limit")) { if (!payload["limit"].IsInt()) { raiseError("retrieve", "aggregateQuery: limit must be specfied as an integer"); return false; } sql.append(" LIMIT "); try { sql.append(payload["limit"].GetInt()); } catch (exception e) { raiseError("retrieve", "aggregateQuery: bad value for limit parameter: %s", e.what()); return false; } } sql.append(';'); // Execute query const char *query = sql.coalesce(); int rc; sqlite3_stmt *stmt; logSQL("CommonRetrieve", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); // Release memory for 'query' var delete[] query; if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } return true; } /** * Append a stream of readings to SQLite db * * @param readings readings to store into the SQLite db * @param commit if true a database commit is executed and a new transaction will be opened at the next execution * */ int Connection::readingStream(ReadingStream **readings, bool commit) { // Row defintion related int i; bool add_row = false; const char *user_ts; string now; char ts[60], micro_s[10]; char formatted_date[LEN_BUFFER_DATE] = {0}; struct tm timeinfo; const char *asset_code; const char *payload; string reading; // Retry mechanism int retries = 0; int sleep_time_ms = 0; // SQLite related sqlite3_stmt *stmt, *batch_stmt; int sqlite3_resut; int rowNumber = -1; #if INSTRUMENT struct timeval start, t1, t2, t3, t4, t5; #endif const char *sql_cmd = "INSERT INTO " READINGS_DB_NAME_BASE ".readings ( user_ts, asset_code, reading ) VALUES (?,?,?)"; string cmd = sql_cmd; for (int i = 0; i < APPEND_BATCH_SIZE - 1; i++) { cmd.append(", (?,?,?)"); } sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL); sqlite3_prepare_v2(dbHandle, cmd.c_str(), cmd.length(), &batch_stmt, NULL); if (sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL) != SQLITE_OK) { raiseError("readingStream", sqlite3_errmsg(dbHandle)); return -1; } // The handling of the commit parameter is overridden as using a pool of connections every execution receives // a differen one, so a commit at every run is executed. m_streamOpenTransaction = true; commit = true; if (m_streamOpenTransaction) { if (sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL) != SQLITE_OK) { raiseError("readingStream", sqlite3_errmsg(dbHandle)); return -1; } m_streamOpenTransaction = false; } #if INSTRUMENT gettimeofday(&start, NULL); #endif int nReadings; for (nReadings = 0; readings[nReadings]; nReadings++); try { unsigned int nBatches = nReadings / APPEND_BATCH_SIZE; int curReading = 0; for (int batch = 0; batch < nBatches; batch++) { int varNo = 1; for (int readingNo = 0; readingNo < APPEND_BATCH_SIZE; readingNo++) { add_row = true; // Handles - asset_code asset_code = RDS_ASSET_CODE(readings, curReading); // Handles - reading payload = RDS_PAYLOAD(readings, curReading); reading = escape(payload); // Handles - user_ts memset(&timeinfo, 0, sizeof(struct tm)); gmtime_r(&RDS_USER_TIMESTAMP(readings, curReading).tv_sec, &timeinfo); std::strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", &timeinfo); snprintf(micro_s, sizeof(micro_s), ".%06lu", RDS_USER_TIMESTAMP(readings, curReading).tv_usec); formatted_date[0] = {0}; strncat(ts, micro_s, 10); user_ts = ts; if (strcmp(user_ts, "now()") == 0) { getNow(now); user_ts = now.c_str(); } else { if (!formatDate(formatted_date, sizeof(formatted_date), user_ts)) { raiseError("streamReadings", "Invalid date |%s|", user_ts); add_row = false; } else { user_ts = formatted_date; } } if (add_row) { if (batch_stmt != NULL) { sqlite3_bind_text(batch_stmt, varNo++, user_ts, -1, SQLITE_STATIC); sqlite3_bind_text(batch_stmt, varNo++, asset_code, -1, SQLITE_STATIC); sqlite3_bind_text(batch_stmt, varNo++, reading.c_str(), -1, SQLITE_STATIC); } } curReading++; } // Write the batch retries = 0; sleep_time_ms = 0; // Retry mechanism in case SQLlite DB is locked do { // Insert the row using a lock to ensure one insert at time { m_writeAccessOngoing.fetch_add(1); //unique_lock<mutex> lck(db_mutex); sqlite3_resut = sqlite3_step(batch_stmt); m_writeAccessOngoing.fetch_sub(1); //db_cv.notify_all(); } if (sqlite3_resut == SQLITE_LOCKED ) { sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:",i, retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } if (sqlite3_resut == SQLITE_BUSY) { ostringstream threadId; threadId << std::this_thread::get_id(); sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,i , retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); if (sqlite3_resut == SQLITE_DONE) { rowNumber++; sqlite3_clear_bindings(batch_stmt); sqlite3_reset(batch_stmt); } else { raiseError("streamReadings", "Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; return -1; } } while (readings[curReading]) { add_row = true; // Handles - asset_code asset_code = RDS_ASSET_CODE(readings, curReading); // Handles - reading payload = RDS_PAYLOAD(readings, curReading); reading = escape(payload); // Handles - user_ts memset(&timeinfo, 0, sizeof(struct tm)); gmtime_r(&RDS_USER_TIMESTAMP(readings, curReading).tv_sec, &timeinfo); std::strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", &timeinfo); snprintf(micro_s, sizeof(micro_s), ".%06lu", RDS_USER_TIMESTAMP(readings, curReading).tv_usec); formatted_date[0] = {0}; strncat(ts, micro_s, 10); user_ts = ts; if (strcmp(user_ts, "now()") == 0) { getNow(now); user_ts = now.c_str(); } else { if (!formatDate(formatted_date, sizeof(formatted_date), user_ts)) { raiseError("streamReadings", "Invalid date |%s|", user_ts); add_row = false; } else { user_ts = formatted_date; } } if (add_row) { if (batch_stmt != NULL) { sqlite3_bind_text(stmt, 1, user_ts, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 2, asset_code, -1, SQLITE_STATIC); sqlite3_bind_text(stmt, 3, reading.c_str(), -1, SQLITE_STATIC); } } curReading++; retries =0; sleep_time_ms = 0; // Retry mechanism in case SQLlite DB is locked do { // Insert the row using a lock to ensure one insert at time { m_writeAccessOngoing.fetch_add(1); //unique_lock<mutex> lck(db_mutex); sqlite3_resut = sqlite3_step(stmt); m_writeAccessOngoing.fetch_sub(1); //db_cv.notify_all(); } if (sqlite3_resut == SQLITE_LOCKED ) { sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:",i, retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } if (sqlite3_resut == SQLITE_BUSY) { ostringstream threadId; threadId << std::this_thread::get_id(); sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,i , retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); if (sqlite3_resut == SQLITE_DONE) { rowNumber++; sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } else { raiseError("streamReadings", "Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; return -1; } } rowNumber = curReading; } catch (exception e) { raiseError("appendReadings", "Inserting a row into SQLite using a prepared statement - error :%s:", e.what()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); m_streamOpenTransaction = true; return -1; } #if INSTRUMENT gettimeofday(&t1, NULL); #endif if (commit) { sqlite3_resut = sqlite3_exec(dbHandle, "END TRANSACTION", NULL, NULL, NULL); if (sqlite3_resut != SQLITE_OK) { raiseError("appendReadings", "Executing the commit of the transaction - error :%s:", sqlite3_errmsg(dbHandle)); rowNumber = -1; } m_streamOpenTransaction = true; } if(stmt != NULL) { if (sqlite3_finalize(stmt) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory structure - error :%s:", sqlite3_errmsg(dbHandle)); } } if(batch_stmt != NULL) { if (sqlite3_finalize(batch_stmt) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory batch structure - error :%s:", sqlite3_errmsg(dbHandle)); } } #if INSTRUMENT gettimeofday(&t2, NULL); #endif #if INSTRUMENT struct timeval tm; double timeT1, timeT2, timeT3; timersub(&t1, &start, &tm); timeT1 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); Logger::getLogger()->warn("readingStream Timing with %d rows - stream handling %.3f seconds - commit/finalize %.3f seconds", rowNumber, timeT1, timeT2); #endif return rowNumber; } /** * Append a set of readings to the readings table */ int Connection::appendReadings(const char *readings) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document doc; int row = 0; bool add_row = false; // Variables related to the SQLite insert using prepared command const char *user_ts; const char *asset_code; string reading; sqlite3_stmt *stmt, *batch_stmt; int sqlite3_resut; string now; // Retry mechanism int retries = 0; int sleep_time_ms = 0; ostringstream threadId; threadId << std::this_thread::get_id(); #if INSTRUMENT Logger::getLogger()->warn("appendReadings start thread :%s:", threadId.str().c_str()); struct timeval start, t1, t2, t3, t4, t5; #endif #if INSTRUMENT gettimeofday(&start, NULL); #endif int len = strlen(readings) + 1; char *readingsCopy = NULL; ParseResult ok; #if INSITU_THRESHOLD if (len > INSITU_THRESHOLD) { readingsCopy = (char *)malloc(len); memcpy(readingsCopy, readings, len); ok = doc.ParseInsitu(readingsCopy); } else #endif { ok = doc.Parse(readings); } if (!ok) { raiseError("appendReadings", GetParseError_En(doc.GetParseError())); if (readingsCopy) { free(readingsCopy); } return -1; } if (!doc.HasMember("readings")) { raiseError("appendReadings", "Payload is missing a readings array"); if (readingsCopy) { free(readingsCopy); } return -1; } Value &readingsValue = doc["readings"]; if (!readingsValue.IsArray()) { raiseError("appendReadings", "Payload is missing the readings array"); if (readingsCopy) { free(readingsCopy); } return -1; } const char *sql_cmd="INSERT INTO " READINGS_DB_NAME_BASE ".readings ( user_ts, asset_code, reading ) VALUES (?,?,?)"; string cmd = sql_cmd; for (int i = 0; i < APPEND_BATCH_SIZE - 1; i++) { cmd.append(", (?,?,?)"); } sqlite3_prepare_v2(dbHandle, sql_cmd, strlen(sql_cmd), &stmt, NULL); sqlite3_prepare_v2(dbHandle, cmd.c_str(), cmd.length(), &batch_stmt, NULL); { m_writeAccessOngoing.fetch_add(1); //unique_lock<mutex> lck(db_mutex); sqlite3_exec(dbHandle, "BEGIN TRANSACTION", NULL, NULL, NULL); #if INSTRUMENT gettimeofday(&t1, NULL); #endif Value::ConstValueIterator itr = readingsValue.Begin(); SizeType nReadings = readingsValue.Size(); unsigned int nBatches = nReadings / APPEND_BATCH_SIZE; Logger::getLogger()->debug("Write %d readings in %d batches of %d", nReadings, nBatches, APPEND_BATCH_SIZE); for (int batch = 0; batch < nBatches; batch++) { int varNo = 1; for (int readingNo = 0; readingNo < APPEND_BATCH_SIZE; readingNo++) { if (!itr->IsObject()) { char err[132]; snprintf(err, sizeof(err), "Each reading in the readings array must be an object. Reading %d of batch %d", readingNo, batch); raiseError("appendReadings",err); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION;", NULL, NULL, NULL); if (readingsCopy) { free(readingsCopy); } return -1; } add_row = true; // Handles - user_ts char formatted_date[LEN_BUFFER_DATE] = {0}; user_ts = (*itr)["user_ts"].GetString(); if (strcmp(user_ts, "now()") == 0) { getNow(now); user_ts = now.c_str(); } else { if (! formatDate(formatted_date, sizeof(formatted_date), user_ts) ) { raiseError("appendReadings", "Invalid date |%s|", user_ts); add_row = false; } else { user_ts = formatted_date; } } if (add_row) { // Handles - asset_code asset_code = (*itr)["asset_code"].GetString(); if (strlen(asset_code) == 0) { Logger::getLogger()->warn("Sqlitelb appendReadings - empty asset code value, row is ignored"); itr++; continue; } // Handles - reading StringBuffer buffer; Writer<StringBuffer> writer(buffer); (*itr)["reading"].Accept(writer); reading = escape(buffer.GetString()); if (stmt != NULL) { sqlite3_bind_text(batch_stmt, varNo++, user_ts, -1, SQLITE_TRANSIENT); sqlite3_bind_text(batch_stmt, varNo++, asset_code, -1, SQLITE_TRANSIENT); sqlite3_bind_text(batch_stmt, varNo++, reading.c_str(), -1, SQLITE_TRANSIENT); } } itr++; if (itr == readingsValue.End()) break; } retries =0; sleep_time_ms = 0; // Retry mechanism in case SQLlite DB is locked do { // Insert the row using a lock to ensure one insert at time { sqlite3_resut = sqlite3_step(batch_stmt); } if (sqlite3_resut == SQLITE_LOCKED ) { sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:" ,row ,retries ,sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } if (sqlite3_resut == SQLITE_BUSY) { ostringstream threadId; threadId << std::this_thread::get_id(); sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,row, retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); if (sqlite3_resut == SQLITE_DONE) { row += APPEND_BATCH_SIZE; sqlite3_clear_bindings(batch_stmt); sqlite3_reset(batch_stmt); } else { raiseError("appendReadings","Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); if (readingsCopy) { free(readingsCopy); } return -1; } } Logger::getLogger()->debug("Now do the remaining readings"); // Do individual inserts for the remainder of the readings while (itr != readingsValue.End()) { if (!itr->IsObject()) { raiseError("appendReadings","Each reading in the readings array must be an object"); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION;", NULL, NULL, NULL); if (readingsCopy) { free(readingsCopy); } return -1; } add_row = true; // Handles - user_ts char formatted_date[LEN_BUFFER_DATE] = {0}; user_ts = (*itr)["user_ts"].GetString(); if (strcmp(user_ts, "now()") == 0) { getNow(now); user_ts = now.c_str(); } else { if (! formatDate(formatted_date, sizeof(formatted_date), user_ts) ) { raiseError("appendReadings", "Invalid date |%s|", user_ts); add_row = false; } else { user_ts = formatted_date; } } if (add_row) { // Handles - asset_code asset_code = (*itr)["asset_code"].GetString(); if (strlen(asset_code) == 0) { Logger::getLogger()->warn("Sqlitelb appendReadings - empty asset code value, row is ignored"); itr++; continue; } // Handles - reading StringBuffer buffer; Writer<StringBuffer> writer(buffer); (*itr)["reading"].Accept(writer); reading = escape(buffer.GetString()); if(stmt != NULL) { sqlite3_bind_text(stmt, 1, user_ts ,-1, SQLITE_TRANSIENT); sqlite3_bind_text(stmt, 2, asset_code ,-1, SQLITE_TRANSIENT); sqlite3_bind_text(stmt, 3, reading.c_str(), -1, SQLITE_TRANSIENT); retries =0; sleep_time_ms = 0; // Retry mechanism in case SQLlite DB is locked do { // Insert the row using a lock to ensure one insert at time { sqlite3_resut = sqlite3_step(stmt); } if (sqlite3_resut == SQLITE_LOCKED ) { sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_LOCKED - record :%d: - retry number :%d: sleep time ms :%d:" ,row ,retries ,sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } if (sqlite3_resut == SQLITE_BUSY) { ostringstream threadId; threadId << std::this_thread::get_id(); sleep_time_ms = PREP_CMD_RETRY_BASE + ((retries / 2 ) * (random() % PREP_CMD_RETRY_BACKOFF)); retries++; Logger::getLogger()->info("SQLITE_BUSY - thread :%s: - record :%d: - retry number :%d: sleep time ms :%d:", threadId.str().c_str() ,row, retries, sleep_time_ms); std::this_thread::sleep_for(std::chrono::milliseconds(sleep_time_ms)); } } while (retries < PREP_CMD_MAX_RETRIES && (sqlite3_resut == SQLITE_LOCKED || sqlite3_resut == SQLITE_BUSY)); if (sqlite3_resut == SQLITE_DONE) { row++; sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } else { raiseError("appendReadings","Inserting a row into SQLite using a prepared command - asset_code :%s: error :%s: reading :%s: ", asset_code, sqlite3_errmsg(dbHandle), reading.c_str()); sqlite3_exec(dbHandle, "ROLLBACK TRANSACTION", NULL, NULL, NULL); if (readingsCopy) { free(readingsCopy); } return -1; } } } itr++; } sqlite3_resut = sqlite3_exec(dbHandle, "END TRANSACTION", NULL, NULL, NULL); if (sqlite3_resut != SQLITE_OK) { raiseError("appendReadings", "Executing the commit of the transaction :%s:", sqlite3_errmsg(dbHandle)); row = -1; } m_writeAccessOngoing.fetch_sub(1); //db_cv.notify_all(); } #if INSTRUMENT gettimeofday(&t2, NULL); #endif if(stmt != NULL) { if (sqlite3_finalize(stmt) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory structure - error :%s:", sqlite3_errmsg(dbHandle)); } } if(batch_stmt != NULL) { if (sqlite3_finalize(batch_stmt) != SQLITE_OK) { raiseError("appendReadings","freeing SQLite in memory batch structure - error :%s:", sqlite3_errmsg(dbHandle)); } } if (readingsCopy) { free(readingsCopy); } #if INSTRUMENT gettimeofday(&t3, NULL); #endif #if INSTRUMENT struct timeval tm; double timeT1, timeT2, timeT3; timersub(&t1, &start, &tm); timeT1 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t2, &t1, &tm); timeT2 = tm.tv_sec + ((double)tm.tv_usec / 1000000); timersub(&t3, &t2, &tm); timeT3 = tm.tv_sec + ((double)tm.tv_usec / 1000000); Logger::getLogger()->warn("appendReadings end thread :%s: buffer :%10lu: count :%5d: JSON :%6.3f: inserts :%6.3f: finalize :%6.3f:", threadId.str().c_str(), strlen(readings), row, timeT1, timeT2, timeT3 ); #endif return row; } /** * Fetch a block of readings from the reading table * It might not work with SQLite 3 * * Fetch, used by the north side, returns timestamp in UTC. * * NOTE : it expects to handle a date having a fixed format * with milliseconds, microseconds and timezone expressed, * like for example : * * 2019-01-11 15:45:01.123456+01:00 */ bool Connection::fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet) { char sqlbuffer[512]; char *zErrMsg = NULL; int rc; int retrieve; // SQL command to extract the data from the readings.readings const char *sql_cmd = R"( SELECT id, asset_code, reading, strftime('%%Y-%%m-%%d %%H:%%M:%%S', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime('%%Y-%%m-%%d %%H:%%M:%%f', ts, 'utc') AS ts FROM )" READINGS_DB_NAME_BASE R"(.readings WHERE id >= %lu ORDER BY id ASC LIMIT %u; )"; /* * This query assumes datetime values are in 'localtime' */ snprintf(sqlbuffer, sizeof(sqlbuffer), sql_cmd, id, blksize); logSQL("ReadingsFetch", sqlbuffer); sqlite3_stmt *stmt; // Prepare the SQL statement and get the result set if (sqlite3_prepare_v2(dbHandle, sqlbuffer, -1, &stmt, NULL) != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } else { // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } else { // Success return true; } } } /** * Perform a query against the readings table * * retrieveReadings, used by the API, returns timestamp in utc unless * otherwise requested. * */ bool Connection::retrieveReadings(const string& condition, string& resultSet) { // Default template parameter uses UTF8 and MemoryPoolAllocator. Document document; SQLBuffer sql; // Extra constraints to add to where clause SQLBuffer jsonConstraints; bool isAggregate = false; const char *timezone = "utc"; vector<string> asset_codes; try { if (dbHandle == NULL) { raiseError("retrieve", "No SQLite 3 db connection available"); return false; } if (condition.empty()) { const char *sql_cmd = R"( SELECT id, asset_code, reading, strftime(')" F_DATEH24_SEC R"(', user_ts, 'utc') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts, strftime(')" F_DATEH24_MS R"(', ts, 'localtime') AS ts FROM )" READINGS_DB_NAME_BASE R"(.readings)"; sql.append(sql_cmd); } else { if (document.Parse(condition.c_str()).HasParseError()) { raiseError("retrieve", "Failed to parse JSON payload"); return false; } if (document.HasMember("timezone") && document["timezone"].IsString()) { timezone = document["timezone"].GetString(); } // timebucket aggregate all datapoints if (aggregateAll(document)) { return aggregateQuery(document, resultSet); } if (document.HasMember("aggregate")) { isAggregate = true; sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } if (!jsonAggregates(document, document["aggregate"], sql, jsonConstraints, true)) { return false; } sql.append(" FROM " READINGS_DB_NAME_BASE "."); } else if (document.HasMember("return")) { int col = 0; Value& columns = document["return"]; if (! columns.IsArray()) { raiseError("retrieve", "The property return must be an array"); return false; } sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } for (Value::ConstValueIterator itr = columns.Begin(); itr != columns.End(); ++itr) { if (col) sql.append(", "); if (!itr->IsObject()) // Simple column name { if (strcmp(itr->GetString() ,"user_ts") == 0) { // Display without TZ expression and microseconds also sql.append(" strftime('" F_DATEH24_SEC "', user_ts, '"); sql.append(timezone); sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); sql.append(" as user_ts "); } else if (strcmp(itr->GetString() ,"ts") == 0) { // Display without TZ expression and microseconds also sql.append(" strftime('" F_DATEH24_MS "', ts, '"); sql.append(timezone); sql.append("') "); sql.append(" as ts "); } else { sql.append(itr->GetString()); } } else { if (itr->HasMember("column")) { if (! (*itr)["column"].IsString()) { raiseError("retrieve", "column must be a string"); return false; } if (itr->HasMember("format")) { if (! (*itr)["format"].IsString()) { raiseError("retrieve", "format must be a string"); return false; } // SQLite 3 date format. string new_format; applyColumnDateFormatLocaltime((*itr)["format"].GetString(), (*itr)["column"].GetString(), new_format, true); // Add the formatted column or use it as is sql.append(new_format); } else if (itr->HasMember("timezone")) { if (! (*itr)["timezone"].IsString()) { raiseError("retrieve", "timezone must be a string"); return false; } // SQLite3 doesnt support time zone formatting const char *tz = (*itr)["timezone"].GetString(); if (strncasecmp(tz, "utc", 3) == 0) { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Extract milliseconds and microseconds for the user_ts fields sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'utc') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", 'utc')"); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } } else if (strncasecmp(tz, "localtime", 9) == 0) { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Extract milliseconds and microseconds for the user_ts fields sql.append("strftime('" F_DATEH24_SEC "', user_ts, 'localtime') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", 'localtime')"); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } } else { raiseError("retrieve", "SQLite3 plugin does not support timezones in queries"); return false; } } else { if (strcmp((*itr)["column"].GetString() ,"user_ts") == 0) { // Extract milliseconds and microseconds for the user_ts fields sql.append("strftime('" F_DATEH24_SEC "', user_ts, '"); sql.append(timezone); sql.append("') "); sql.append(" || substr(user_ts, instr(user_ts, '.'), 7) "); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } else { sql.append("strftime('" F_DATEH24_MS "', "); sql.append((*itr)["column"].GetString()); sql.append(", '"); sql.append(timezone); sql.append("')"); if (! itr->HasMember("alias")) { sql.append(" AS "); sql.append((*itr)["column"].GetString()); } } } sql.append(' '); } else if (itr->HasMember("json")) { const Value& json = (*itr)["json"]; if (! returnJson(json, sql, jsonConstraints)) return false; } else { raiseError("retrieve", "return object must have either a column or json property"); return false; } if (itr->HasMember("alias")) { sql.append(" AS \""); sql.append((*itr)["alias"].GetString()); sql.append('"'); } } col++; } sql.append(" FROM " READINGS_DB_NAME_BASE "."); } else { sql.append("SELECT "); if (document.HasMember("modifier")) { sql.append(document["modifier"].GetString()); sql.append(' '); } sql.append("id, asset_code, reading, strftime('" F_DATEH24_SEC "', user_ts, '"); sql.append(timezone); sql.append("') || substr(user_ts, instr(user_ts, '.'), 7) AS user_ts,"); sql.append("strftime('" F_DATEH24_MS "', ts, '"); sql.append(timezone); sql.append("') AS ts FROM " READINGS_DB_NAME_BASE "."); } sql.append("readings"); if (document.HasMember("where")) { sql.append(" WHERE "); if (document.HasMember("where")) { if (!jsonWhereClause(document["where"], sql, asset_codes)) { return false; } } else { raiseError("retrieve", "JSON does not contain where clause"); return false; } if (! jsonConstraints.isEmpty()) { sql.append(" AND "); const char *jsonBuf = jsonConstraints.coalesce(); sql.append(jsonBuf); delete[] jsonBuf; } } else if (isAggregate) { /* * Performance improvement: force sqlite to use an index * if we are doing an aggregate and have no where clause. */ sql.append(" WHERE asset_code = asset_code"); } if (!jsonModifiers(document, sql, true)) { return false; } } sql.append(';'); const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; logSQL("ReadingsRetrieve", query); // Prepare the SQL statement and get the result set rc = sqlite3_prepare_v2(dbHandle, query, -1, &stmt, NULL); // Release memory for 'query' var delete[] query; if (rc != SQLITE_OK) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); return false; } // Call result set mapping rc = mapResultSet(stmt, resultSet); // Delete result set sqlite3_finalize(stmt); // Check result set mapping errors if (rc != SQLITE_DONE) { raiseError("retrieve", sqlite3_errmsg(dbHandle)); // Failure return false; } // Success return true; } catch (exception e) { raiseError("retrieve", "Internal error: %s", e.what()); return false; } } /** * Purge readings from the reading table */ unsigned int Connection::purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& result) { long unsentPurged = 0; long unsentRetained = 0; long numReadings = 0; unsigned long rowidLimit = 0, minrowidLimit = 0, maxrowidLimit = 0, rowidMin; struct timeval startTv, endTv; int blocks = 0; bool flag_retain; Logger *logger = Logger::getLogger(); flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) { flag_retain = true; } Logger::getLogger()->debug("%s - flags :%X: flag_retain :%d: sent :%ld:", __FUNCTION__, flags, flag_retain, sent); // Prepare empty result result = "{ \"removed\" : 0, "; result += " \"unsentPurged\" : 0, "; result += " \"unsentRetained\" : 0, "; result += " \"readings\" : 0, "; result += " \"method\" : \"time\", "; result += " \"duration\" : 0 }"; logger->info("Purge starting..."); gettimeofday(&startTv, NULL); /* * We fetch the current rowid and limit the purge process to work on just * those rows present in the database when the purge process started. * This prevents us looping in the purge process if new readings become * eligible for purging at a rate that is faster than we can purge them. */ { char *zErrMsg = NULL; int rc; rc = SQLexec(dbHandle, "readings", "select max(rowid) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &rowidLimit, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phase 0, fetching rowid limit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } maxrowidLimit = rowidLimit; } { char *zErrMsg = NULL; int rc; rc = SQLexec(dbHandle, "readings", "select min(rowid) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &minrowidLimit, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching minrowid limit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } } if (age == 0) { /* * An age of 0 means remove the oldest hours data. * So set age based on the data we have and continue. */ SQLBuffer oldest; oldest.append("SELECT (strftime('%s','now', 'utc') - strftime('%s', MIN(user_ts)))/360 FROM " READINGS_DB_NAME_BASE "." READINGS_TABLE " where rowid <= "); oldest.append(rowidLimit); oldest.append(';'); const char *query = oldest.coalesce(); char *zErrMsg = NULL; int rc; int purge_readings = 0; // Exec query and get result in 'purge_readings' via 'selectCallback' rc = SQLexec(dbHandle, "readings", query, selectCallback, &purge_readings, &zErrMsg); // Release memory for 'query' var delete[] query; if (rc == SQLITE_OK) { age = purge_readings; } else { raiseError("purge - phase 1", zErrMsg); sqlite3_free(zErrMsg); return 0; } } { /* * Refine rowid limit to just those rows older than age hours. */ char *zErrMsg = NULL; int rc; unsigned long l = minrowidLimit; unsigned long r; if (flag_retain) { r = min(sent, rowidLimit); } else { r = rowidLimit; } r = max(r, l); logger->debug ("%s:%d: l=%u, r=%u, sent=%u, rowidLimit=%u, minrowidLimit=%u, flags=%u", __FUNCTION__, __LINE__, l, r, sent, rowidLimit, minrowidLimit, flags); if (l == r) { logger->info("No data to purge: min_id == max_id == %u", minrowidLimit); return 0; } unsigned long m=l; sqlite3_stmt *idStmt; bool isMinId = false; while (l <= r) { unsigned long midRowId = 0; unsigned long prev_m = m; m = l + (r - l) / 2; if (prev_m == m) break; // e.g. select id from readings where rowid = 219867307 AND user_ts < datetime('now' , '-24 hours', 'utc'); SQLBuffer sqlBuffer; sqlBuffer.append("select id from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where rowid = ?"); sqlBuffer.append(" AND user_ts < datetime('now' , '-?"); sqlBuffer.append(" hours');"); const char *query = sqlBuffer.coalesce(); rc = sqlite3_prepare_v2(dbHandle, query, -1, &idStmt, NULL); sqlite3_bind_int(idStmt, 1,(unsigned long) m); sqlite3_bind_int(idStmt, 2,(unsigned long) age); if (SQLstep(idStmt) == SQLITE_ROW) { midRowId = sqlite3_column_int(idStmt, 0); isMinId = true; } delete[] query; sqlite3_clear_bindings(idStmt); sqlite3_reset(idStmt); if (rc == SQLITE_ERROR) { raiseError("purge - phase 1, fetching midRowId ", sqlite3_errmsg(dbHandle)); return 0; } if (midRowId == 0) // mid row doesn't satisfy given condition for user_ts, so discard right/later half and look in left/earlier half { // search in earlier/left half r = m - 1; // The m position should be skipped as midRowId is 0 m = r; } else //if (l != m) { // search in later/right half l = m + 1; } } if(isMinId) { sqlite3_finalize(idStmt); } rowidLimit = m; { // Fix the value of rowidLimit Logger::getLogger()->debug("%s - s1 rowidLimit :%lu: minrowidLimit :%lu: maxrowidLimit :%lu:", __FUNCTION__, rowidLimit, minrowidLimit, maxrowidLimit); SQLBuffer sqlBuffer; sqlBuffer.append("select max(id) from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where rowid <= "); sqlBuffer.append(rowidLimit); sqlBuffer.append(" AND user_ts < datetime('now' , '-"); sqlBuffer.append(age); sqlBuffer.append(" hours');"); const char *query = sqlBuffer.coalesce(); rc = SQLexec(dbHandle, "readings", query, rowidCallback, &rowidLimit, &zErrMsg); delete[] query; if (rc != SQLITE_OK) { raiseError("purge - phase 1, fetching rowidLimit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } Logger::getLogger()->debug("%s - s2 rowidLimit :%lu: minrowidLimit :%lu: maxrowidLimit :%lu:", __FUNCTION__, rowidLimit, minrowidLimit, maxrowidLimit); } if (minrowidLimit == rowidLimit) { logger->info("No data to purge"); return 0; } rowidMin = minrowidLimit; } //logger->info("Purge collecting unsent row count"); if ( ! flag_retain ) { char *zErrMsg = NULL; int rc; int lastPurgedId; SQLBuffer idBuffer; idBuffer.append("select id from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where rowid = "); idBuffer.append(rowidLimit); idBuffer.append(';'); const char *idQuery = idBuffer.coalesce(); rc = SQLexec(dbHandle, "readings", idQuery, rowidCallback, &lastPurgedId, &zErrMsg); // Release memory for 'idQuery' var delete[] idQuery; if (rc != SQLITE_OK) { raiseError("purge - phase 0, fetching rowid limit ", zErrMsg); sqlite3_free(zErrMsg); return 0; } if (sent != 0 && lastPurgedId > sent) // Unsent readings will be purged { // Get number of unsent rows we are about to remove int unsent = rowidLimit - sent; unsentPurged = unsent; } } #if 0 if (m_writeAccessOngoing) { while (m_writeAccessOngoing) { logger->warn("Yielding for another write access"); std::this_thread::yield(); } } #endif unsigned int deletedRows = 0; unsigned int rowsAffected, totTime=0, prevBlocks=0, prevTotTime=0; logger->info("Purge about to delete readings # %ld to %ld", rowidMin, rowidLimit); sqlite3_stmt *stmt; bool rowsAvailableToPurge = false; while (rowidMin < rowidLimit) { blocks++; rowidMin += purgeBlockSize; if (rowidMin > rowidLimit) { rowidMin = rowidLimit; } int rc; { SQLBuffer sql; sql.append("DELETE FROM " READINGS_DB_NAME_BASE "." READINGS_TABLE " WHERE rowid <= ? ;"); const char *query = sql.coalesce(); rc = sqlite3_prepare_v2(dbHandle, query, strlen(query), &stmt, NULL); if (rc != SQLITE_OK) { raiseError("purgeReadings", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", query); return 0; } delete[] query; } sqlite3_bind_int(stmt, 1,(unsigned long) rowidMin); rowsAvailableToPurge = true; { //unique_lock<mutex> lck(db_mutex); // if (m_writeAccessOngoing) db_cv.wait(lck); START_TIME; // Exec DELETE query: no callback, no resultset rc = SQLstep(stmt); END_TIME; logSQL("ReadingsPurge", sqlite3_expanded_sql(stmt)); logger->debug("%s - DELETE - query :%s: rowsAffected :%ld:", __FUNCTION__, sqlite3_expanded_sql(stmt) ,rowsAffected); totTime += usecs; if(usecs>150000) { std::this_thread::yield(); // Give other threads a chance to run } } if (rc == SQLITE_DONE) { sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } else { raiseError("purge - phase 3", sqlite3_errmsg(dbHandle)); return 0; } // Get db changes rowsAffected = sqlite3_changes(dbHandle); deletedRows += rowsAffected; logger->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); if(blocks % RECALC_PURGE_BLOCK_SIZE_NUM_BLOCKS == 0) { int prevAvg = prevTotTime/(prevBlocks?prevBlocks:1); int currAvg = (totTime-prevTotTime)/(blocks-prevBlocks); int avg = ((prevAvg?prevAvg:currAvg)*5 + currAvg*5) / 10; // 50% weightage for long term avg and 50% weightage for current avg prevBlocks = blocks; prevTotTime = totTime; int deviation = abs(avg - TARGET_PURGE_BLOCK_DEL_TIME); logger->debug("blocks=%d, totTime=%d usecs, prevAvg=%d usecs, currAvg=%d usecs, avg=%d usecs, TARGET_PURGE_BLOCK_DEL_TIME=%d usecs, deviation=%d usecs", blocks, totTime, prevAvg, currAvg, avg, TARGET_PURGE_BLOCK_DEL_TIME, deviation); if (deviation > TARGET_PURGE_BLOCK_DEL_TIME/10) { float ratio = (float)TARGET_PURGE_BLOCK_DEL_TIME / (float)avg; if (ratio > 2.0) ratio = 2.0; if (ratio < 0.5) ratio = 0.5; purgeBlockSize = (float)purgeBlockSize * ratio; purgeBlockSize = purgeBlockSize / PURGE_BLOCK_SZ_GRANULARITY * PURGE_BLOCK_SZ_GRANULARITY; if (purgeBlockSize < MIN_PURGE_DELETE_BLOCK_SIZE) purgeBlockSize = MIN_PURGE_DELETE_BLOCK_SIZE; if (purgeBlockSize > MAX_PURGE_DELETE_BLOCK_SIZE) purgeBlockSize = MAX_PURGE_DELETE_BLOCK_SIZE; logger->debug("Changed purgeBlockSize to %d", purgeBlockSize); } std::this_thread::yield(); // Give other threads a chance to run } //Logger::getLogger()->debug("Purge delete block #%d with %d readings", blocks, rowsAffected); } while (rowidMin < rowidLimit); if (rowsAvailableToPurge) { sqlite3_finalize(stmt); } unsentRetained = maxrowidLimit - rowidLimit; numReadings = maxrowidLimit +1 - minrowidLimit - deletedRows; if (sent == 0) // Special case when not north process is used { unsentPurged = deletedRows; } gettimeofday(&endTv, NULL); unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; convert << " \"readings\" : " << numReadings << ", "; convert << " \"method\" : \"time\", "; convert << " \"duration\" : " << duration << " }"; result = convert.str(); //logger->debug("Purge result=%s", result.c_str()); logger->info("Purge process complete in %d blocks in %lduS", blocks, duration); Logger::getLogger()->debug("%s - age :%lu: flag_retain :%x: sent :%lu: result :%s:", __FUNCTION__, age, flags, flag_retain, result.c_str() ); return deletedRows; } /** * Purge readings from the reading table */ unsigned int Connection::purgeReadingsByRows(unsigned long rows, unsigned int flags, unsigned long sent, std::string& result) { unsigned long deletedRows = 0, unsentPurged = 0, unsentRetained = 0, numReadings = 0; unsigned long limit = 0; unsigned long rowcount, minId, maxId; unsigned long rowsAffected; unsigned long deletePoint; bool flag_retain; struct timeval startTv, endTv; Logger *logger = Logger::getLogger(); gettimeofday(&startTv, NULL); flag_retain = false; if ( (flags & STORAGE_PURGE_RETAIN_ANY) || (flags & STORAGE_PURGE_RETAIN_ALL) ) { flag_retain = true; } Logger::getLogger()->debug("%s - flags :%X: flag_retain :%d: sent :%ld:", __FUNCTION__, flags, flag_retain, sent); logger->info("Purge by Rows called"); if (flag_retain) { limit = sent; logger->info("Sent is %lu", sent); } logger->info("Purge by Rows called with flag_retain %d, rows %lu, limit %lu", flag_retain, rows, limit); // Don't save unsent rows char *zErrMsg = NULL; int rc; sqlite3_stmt *stmt; sqlite3_stmt *idStmt; rc = SQLexec(dbHandle, "readings", "select count(rowid) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &rowcount, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching row count", zErrMsg); sqlite3_free(zErrMsg); return 0; } rc = SQLexec(dbHandle, "readings", "select max(id) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";", rowidCallback, &maxId, &zErrMsg); if (rc != SQLITE_OK) { raiseError("purge - phaase 0, fetching maximum id", zErrMsg); sqlite3_free(zErrMsg); return 0; } numReadings = rowcount; rowsAffected = 0; deletedRows = 0; bool rowsAvailableToPurge = true; // Create the prepared statements SQLBuffer sqlBuffer; sqlBuffer.append("select min(id) from " READINGS_DB_NAME_BASE "." READINGS_TABLE ";"); const char *idquery = sqlBuffer.coalesce(); rc = sqlite3_prepare_v2(dbHandle, idquery, -1, &idStmt, NULL); if (rc != SQLITE_OK) { raiseError("purgeReadingsByRows", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", idquery); delete[] idquery; return 0; } delete[] idquery; SQLBuffer sql; sql.append("delete from " READINGS_DB_NAME_BASE "." READINGS_TABLE " where id <= ? ;"); const char *delquery = sql.coalesce(); rc = sqlite3_prepare_v2(dbHandle, delquery, strlen(delquery), &stmt, NULL); if (rc != SQLITE_OK) { raiseError("purgeReadingsByRows", sqlite3_errmsg(dbHandle)); Logger::getLogger()->error("SQL statement: %s", delquery); delete[] delquery; return 0; } delete[] delquery; do { if (rowcount <= rows) { logger->info("Row count %d is less than required rows %d", rowcount, rows); rowsAvailableToPurge = false; break; } if (SQLstep(idStmt) == SQLITE_ROW) { minId = sqlite3_column_int(idStmt, 0); } sqlite3_clear_bindings(idStmt); sqlite3_reset(idStmt); if (rc == SQLITE_ERROR) { raiseError("purge - phaase 0, fetching minimum id", sqlite3_errmsg(dbHandle)); sqlite3_free(zErrMsg); return 0; } deletePoint = minId + m_purgeBlockSize; if (maxId - deletePoint < rows || deletePoint > maxId) deletePoint = maxId - rows; // Do not delete if (flag_retain) { if (limit < deletePoint) { deletePoint = limit; } } { logger->info("RowCount %lu, Max Id %lu, min Id %lu, delete point %lu", rowcount, maxId, minId, deletePoint); } sqlite3_bind_int(stmt, 1,(unsigned long) deletePoint); { // Exec DELETE query: no callback, no resultset rc = SQLstep(stmt); if (rc == SQLITE_DONE) { sqlite3_clear_bindings(stmt); sqlite3_reset(stmt); } rowsAffected = sqlite3_changes(dbHandle); deletedRows += rowsAffected; numReadings -= rowsAffected; rowcount -= rowsAffected; logger->debug("Deleted %lu rows", rowsAffected); if (rowsAffected == 0) { break; } if (limit != 0 && sent != 0) { unsentPurged = deletePoint - sent; } else if (!limit) { unsentPurged += rowsAffected; } } std::this_thread::yield(); // Give other threads a chance to run } while (rowcount > rows); if (rowsAvailableToPurge) { sqlite3_finalize(idStmt); sqlite3_finalize(stmt); } if (limit) { unsentRetained = numReadings - rows; } gettimeofday(&endTv, NULL); unsigned long duration = (1000000 * (endTv.tv_sec - startTv.tv_sec)) + endTv.tv_usec - startTv.tv_usec; ostringstream convert; convert << "{ \"removed\" : " << deletedRows << ", "; convert << " \"unsentPurged\" : " << unsentPurged << ", "; convert << " \"unsentRetained\" : " << unsentRetained << ", "; convert << " \"readings\" : " << numReadings << ", "; convert << " \"method\" : \"rows\", "; convert << " \"duration\" : " << duration << " }"; result = convert.str(); Logger::getLogger()->debug("%s - rows :%lu: flag :%x: sent :%lu: numReadings :%lu: rowsAffected :%u: result :%s:", __FUNCTION__, rows, flags, sent, numReadings, rowsAffected, result.c_str() ); logger->info("Purge by Rows complete: %s", result.c_str()); return deletedRows; } /** * Purge readings by asset or purge all readings * * @param asset The asset name to purge * If empty all assets will be removed * @return The number of removed asset records */ unsigned int Connection::purgeReadingsAsset(const string& asset) { SQLBuffer sql; unsigned int rowsAffected = 0; sql.append("DELETE FROM " READINGS_DB_NAME_BASE "." READINGS_TABLE); if (!asset.empty()) { sql.append(" WHERE asset_code = '"); sql.append(asset); sql.append('\''); } sql.append(';'); const char *query = sql.coalesce(); char *zErrMsg = NULL; int rc; logSQL("ReadingsAssetPurge", query); #if 0 if (m_writeAccessOngoing) { while (m_writeAccessOngoing) { std::this_thread::yield(); } } #endif START_TIME; // Exec DELETE query: no callback, no resultset rc = SQLexec(dbHandle, "readings", query, NULL, NULL, &zErrMsg); END_TIME; // Release memory for 'query' var delete[] query; if (rc != SQLITE_OK) { raiseError("ReadingsAssetPurge", zErrMsg); sqlite3_free(zErrMsg); return rowsAffected; } // Get db changes rowsAffected = sqlite3_changes(dbHandle); return rowsAffected; } ================================================ FILE: C/plugins/storage/sqlitelb/include/common.h ================================================ #ifndef _COMMON_CONNECTION_H #define _COMMON_CONNECTION_H #include <sql_buffer.h> #include <iostream> #include <sqlite3.h> #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <string> #include <map> #include <stdarg.h> #include <stdlib.h> #include <sstream> #include <logger.h> #include <time.h> #include <unistd.h> #include <chrono> #include <thread> #include <atomic> #include <condition_variable> #include <sys/time.h> #include <connection.h> #define STORAGE_PURGE_RETAIN_ANY 0x0001U #define STORAGE_PURGE_RETAIN_ALL 0x0002U #define STORAGE_PURGE_SIZE 0x0004U #define DB_CONFIGURATION "PRAGMA busy_timeout = 5000; PRAGMA cache_size = -4000; PRAGMA journal_mode = WAL; PRAGMA secure_delete = off; PRAGMA journal_size_limit = 4096000;" static std::map<std::string, std::string> sqliteDateFormat = { {"HH24:MI:SS", F_TIMEH24_S}, {"YYYY-MM-DD HH24:MI:SS.MS", F_DATEH24_MS}, {"YYYY-MM-DD HH24:MI:SS", F_DATEH24_S}, {"YYYY-MM-DD HH24:MI", F_DATEH24_M}, {"YYYY-MM-DD HH24", F_DATEH24_H}, {"", ""} }; #endif ================================================ FILE: C/plugins/storage/sqlitelb/include/profile.h ================================================ #ifndef _PROFILE_H #define _PROFILE_H /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <string> #include <vector> #include <sys/time.h> #include <logger.h> #define TIME_BUCKETS 20 #define BUCKET_SIZE 5 class ProfileItem { public: ProfileItem(const std::string& reference) : m_reference(reference) { gettimeofday(&m_tvStart, NULL); auto timenow = chrono::system_clock::to_time_t(chrono::system_clock::now()); m_ts = std::string(ctime(&timenow)); m_ts.back() = '\0'; }; ~ProfileItem() {}; void complete() { struct timeval tv; gettimeofday(&tv, NULL); m_duration = (tv.tv_sec - m_tvStart.tv_sec) * 1000 + (tv.tv_usec - m_tvStart.tv_usec) / 1000; }; unsigned long getDuration() { return m_duration; }; const std::string& getReference() const { return m_reference; }; const std::string& getTs() const { return m_ts; }; private: std::string m_reference; struct timeval m_tvStart; unsigned long m_duration; std::string m_ts; }; class QueryProfile { public: QueryProfile(int samples) : m_samples(samples) { time(&m_lastReport); }; void insert(ProfileItem *item) { int b = item->getDuration() / BUCKET_SIZE; if (b >= TIME_BUCKETS) b = TIME_BUCKETS - 1; m_buckets[b]++; if (m_items.size() == m_samples) { int minIndex = 0; unsigned long minDuration = m_items[0]->getDuration(); for (int i = 1; i < m_items.size(); i++) { if (m_items[i]->getDuration() < minDuration) { minDuration = m_items[i]->getDuration(); minIndex = i; } } if (item->getDuration() > minDuration) { delete m_items[minIndex]; m_items[minIndex] = item; } else { delete item; } } else { m_items.push_back(item); } if (time(0) - m_lastReport > 600) { report(); } }; private: int m_samples; std::vector<ProfileItem *> m_items; time_t m_lastReport; unsigned int m_buckets[TIME_BUCKETS]; void report() { Logger *logger = Logger::getLogger(); logger->info("Storage profile report"); logger->info(" < %3d mS %d", BUCKET_SIZE, m_buckets[0]); for (int j = 1; j < TIME_BUCKETS - 1; j++) { logger->info("%3d-%3d mS %d", j * BUCKET_SIZE, (j + 1) * BUCKET_SIZE, m_buckets[j]); } logger->info(" > %3d mS %d", BUCKET_SIZE * TIME_BUCKETS, m_buckets[TIME_BUCKETS-1]); for (int i = 0; i < m_items.size(); i++) { logger->info("%ld mS, %s, %s\n", m_items[i]->getDuration(), m_items[i]->getTs().c_str(), m_items[i]->getReference().c_str()); } time(&m_lastReport); }; }; #endif ================================================ FILE: C/plugins/storage/sqlitelb/plugin.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sqlite_common.h> #include <connection_manager.h> #include <connection.h> #include <plugin_api.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include "sqlite3.h" #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include <sstream> #include <iostream> #include <string> #include <logger.h> #include <plugin_exception.h> #include <reading_stream.h> #include <config_category.h> using namespace std; using namespace rapidjson; /** * The SQLite3 plugin interface */ extern "C" { const char *default_config = QUOTE({ "poolSize" : { "description" : "Connection pool size", "type" : "integer", "default" : "5", "displayName" : "Pool Size", "order" : "1" }, "vacuumInterval" : { "description" : "The interval between execution of a SQLite vacuum command", "type" : "integer", "minimum" : "1", "default" : "6", "displayName" : "Vacuum Interval", "order" : "2" }, "purgeBlockSize" : { "description" : "The number of rows to purge in each delete statement", "type" : "integer", "default" : "10000", "displayName" : "Purge Block Size", "order" : "3", "minimum" : "1000", "maximum" : "100000" } }); /** * The plugin information structure */ static PLUGIN_INFORMATION info = { "SQLiteLb", // Name "1.2.0", // Version SP_COMMON|SP_READINGS, // Flags PLUGIN_TYPE_STORAGE, // Type "1.6.0", // Interface version default_config }; /** * Return the information about this plugin */ PLUGIN_INFORMATION *plugin_info() { return &info; } /** * Initialise the plugin, called to get the plugin handle * In the case of SQLLite we also get a pool of connections * to use. */ PLUGIN_HANDLE plugin_init(ConfigCategory *category) { ConnectionManager *manager = ConnectionManager::getInstance(); int poolSize = 5; if (category->itemExists("poolSize")) { poolSize = strtol(category->getValue("poolSize").c_str(), NULL, 10); } manager->growPool(poolSize); if (category->itemExists("vacuumInterval")) { manager->setVacuumInterval(strtol(category->getValue("vacuumInterval").c_str(), NULL, 10)); } if (category->itemExists("purgeBlockSize")) { unsigned long purgeBlockSize = strtoul(category->getValue("purgeBlockSize").c_str(), NULL, 10); manager->setPurgeBlockSize(purgeBlockSize); } return manager; } /** * Insert into an arbitrary table */ int plugin_common_insert(PLUGIN_HANDLE handle, char *schema, char *table, char *data) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->insert(std::string(schema), std::string(table), std::string(data)); manager->release(connection); return result; } /** * Retrieve data from an arbitrary table */ const char *plugin_common_retrieve(PLUGIN_HANDLE handle, char *schema, char *table, char *query) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; bool rval = connection->retrieve(std::string(schema), std::string(table), std::string(query), results); manager->release(connection); if (rval) { return strdup(results.c_str()); } return NULL; } /** * Update an arbitary table */ int plugin_common_update(PLUGIN_HANDLE handle, char *schema, char *table, char *data) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->update(std::string(schema), std::string(table), std::string(data)); manager->release(connection); return result; } /** * Delete from an arbitrary table */ int plugin_common_delete(PLUGIN_HANDLE handle, char *schema, char *table, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->deleteRows(std::string(schema), std::string(table), std::string(condition)); manager->release(connection); return result; } /** * Append a sequence of readings to the readings buffer */ int plugin_reading_append(PLUGIN_HANDLE handle, char *readings) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->appendReadings(readings); manager->release(connection); return result;; } /** * Append a stream of readings to the readings buffer */ int plugin_readingStream(PLUGIN_HANDLE handle, ReadingStream **readings, bool commit) { int result = 0; ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); result = connection->readingStream(readings, commit); manager->release(connection); return result;; } /** * Fetch a block of readings from the readings buffer */ char *plugin_reading_fetch(PLUGIN_HANDLE handle, unsigned long id, unsigned int blksize) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string resultSet; connection->fetchReadings(id, blksize, resultSet); manager->release(connection); return strdup(resultSet.c_str()); } /** * Retrieve some readings from the readings buffer */ char *plugin_reading_retrieve(PLUGIN_HANDLE handle, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; connection->retrieveReadings(std::string(condition), results); manager->release(connection); return strdup(results.c_str()); } /** * Purge readings from the buffer */ char *plugin_reading_purge(PLUGIN_HANDLE handle, unsigned long param, unsigned int flags, unsigned long sent) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; unsigned long age, size; if (flags & STORAGE_PURGE_SIZE) { (void)connection->purgeReadingsByRows(param, flags, sent, results); } else { age = param; (void)connection->purgeReadings(age, flags, sent, results); } manager->release(connection); return strdup(results.c_str()); } /** * Release a previously returned result set */ void plugin_release(PLUGIN_HANDLE handle, char *results) { (void)handle; free(results); } /** * Return details on the last error that occured. */ PLUGIN_ERROR *plugin_last_error(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; return manager->getError(); } /** * Shutdown the plugin */ bool plugin_shutdown(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; manager->shutdown(); return true; } /** * Create snapshot of a common table * * @param handle The plugin handle * @param table The table to shapshot * @param id The snapshot id * @return -1 on error, >= o on success * * The new created table has the following name: * table_id */ int plugin_create_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->create_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Load a snapshot of a common table * * @param handle The plugin handle * @param table The table to fill from a given snapshot * @param id The table snapshot id * @return -1 on error, >= o on success */ int plugin_load_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->load_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Delete a snapshot of a common table * * @param handle The plugin handle * @param table The table which shapshot will be removed * @param id The snapshot id * @return -1 on error, >= o on success * */ int plugin_delete_table_snapshot(PLUGIN_HANDLE handle, char *table, char *id) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->delete_table_snapshot(std::string(table), std::string(id)); manager->release(connection); return result; } /** * Get all snapshots of a given common table * * @param handle The plugin handle * @param table The table name * @return List of snapshots (even empty list) or NULL for errors * */ const char* plugin_get_table_snapshots(PLUGIN_HANDLE handle, char *table) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; bool rval = connection->get_table_snapshots(std::string(table), results); manager->release(connection); return rval ? strdup(results.c_str()) : NULL; } /** * Update or creats a schema * * @param handle The plugin handle * @param schema The name of the schema * @param definition The schema definition * @return -1 on error, >= 0 on success * */ int plugin_createSchema(PLUGIN_HANDLE handle, char *definition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->createSchema(std::string(definition)); manager->release(connection); return result; } /** * Purge given readings asset or all readings from the buffer */ unsigned int plugin_reading_purge_asset(PLUGIN_HANDLE handle, char *asset) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); unsigned int deleted = connection->purgeReadingsAsset(asset); manager->release(connection); return deleted; } }; ================================================ FILE: C/plugins/storage/sqlitememory/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(sqlitememory) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(STORAGE_COMMON_LIB storage-common-lib) # Path of compiled sqlite3 file: /usr/local/bin set(FLEDGE_SQLITE3_LIBS "/usr/local/bin" CACHE INTERNAL "") # Find source files # Add sqlitelb plugin common files file(GLOB COMMON_SOURCES ../sqlitelb/common/*.cpp) # Add sqlitememory files file(GLOB SOURCES *.cpp) # Include header files include_directories(../../../common/include) include_directories(../../../services/common/include) include_directories(../common/include) include_directories(../../../thirdparty/rapidjson/include) # Add sqlitelb plugin header files include_directories(../sqlitelb/include) include_directories(../sqlitelb/common/include) include_directories(../sqlite/common/include) link_directories(${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES} ${COMMON_SOURCES}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) target_link_libraries(${PROJECT_NAME} ${STORAGE_COMMON_LIB}) # Check Sqlite3 required version set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}") find_package(sqlite3) add_definitions(-DSQLITE_SPLIT_READINGS=1) add_definitions(-DPLUGIN_LOG_NAME="SQLite 3 in_memory") add_definitions(-DMEMORY_READING_PLUGIN=1) # Use static SQLite3 library if(EXISTS ${FLEDGE_SQLITE3_LIBS}) include_directories(${FLEDGE_SQLITE3_LIBS}) target_link_libraries(${PROJECT_NAME} -L"${FLEDGE_SQLITE3_LIBS}/.libs" -lsqlite3) else() # Link with SQLite3 library target_link_libraries(${PROJECT_NAME} -lsqlite3) endif() # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/plugins/storage/${PROJECT_NAME}) ================================================ FILE: C/plugins/storage/sqlitememory/Findsqlite3.cmake ================================================ # This CMake file locates the SQLite3 development libraries # # The following variables are set: # SQLITE_FOUND - If the SQLite library was found # SQLITE_LIBRARIES - Path to the static library # SQLITE_INCLUDE_DIR - Path to SQLite headers # SQLITE_VERSION - Library version set(SQLITE_MIN_VERSION "3.11.0") find_path(SQLITE_INCLUDE_DIR sqlite3.h) find_library(SQLITE_LIBRARIES NAMES libsqlite3.so) # Check wether path of compiled libsqlite3.a and .h files exists if (EXISTS ${FLEDGE_SQLITE3_LIBS}) find_path(SQLITE_INCLUDE_DIR sqlite3.h PATHS ${FLEDGE_SQLITE3_LIBS}) find_library(SQLITE_LIBRARIES NAMES libsqlite3.a PATHS "${FLEDGE_SQLITE3_LIBS}/.libs") else() # Use system defaults find_path(SQLITE_INCLUDE_DIR sqlite3.h) find_library(SQLITE_LIBRARIES NAMES libsqlite3.so) endif() if (SQLITE_INCLUDE_DIR AND SQLITE_LIBRARIES) execute_process(COMMAND grep ".*#define.*SQLITE_VERSION " ${SQLITE_INCLUDE_DIR}/sqlite3.h COMMAND sed "s/.*\"\\(.*\\)\".*/\\1/" OUTPUT_VARIABLE SQLITE_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE) if ("${SQLITE_VERSION}" VERSION_LESS "${SQLITE_MIN_VERSION}") message(FATAL_ERROR "SQLite3 version >= ${SQLITE_MIN_VERSION} required, found version ${SQLITE_VERSION}") else() message(STATUS "Found SQLite version ${SQLITE_VERSION}: ${SQLITE_LIBRARIES}") set(SQLITE_FOUND TRUE) endif() else() message(FATAL_ERROR "Could not find SQLite") endif() ================================================ FILE: C/plugins/storage/sqlitememory/connection.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2019 OSIsoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <connection.h> #include <connection_manager.h> #include <sqlite_common.h> #include <utils.h> #include <unistd.h> /** * SQLite3 storage plugin for Fledge */ using namespace std; using namespace rapidjson; static time_t connectErrorTime = 0; /** * Create a SQLite3 database connection */ Connection::Connection() { if (getenv("FLEDGE_TRACE_SQL")) { m_logSQL = true; } else { m_logSQL = false; } /** * Create IN MEMORY database for "readings" table: set empty file */ const char *dbHandleConn = "file:?cache=shared"; // UTC time as default const char * createReadings = "CREATE TABLE " READINGS_DB_NAME_BASE " ." READINGS_TABLE_MEM " (" \ "id INTEGER PRIMARY KEY AUTOINCREMENT," \ "asset_code character varying(50) NOT NULL," \ "reading JSON NOT NULL DEFAULT '{}'," \ "user_ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW' ))," \ "ts DATETIME DEFAULT (STRFTIME('%Y-%m-%d %H:%M:%f+00:00', 'NOW' ))" \ ");"; const char * createReadingsFk = "CREATE INDEX fki_" READINGS_TABLE_MEM "_fk1 ON " READINGS_TABLE_MEM " (asset_code);"; const char * createReadingsIdx1 = "CREATE INDEX ix1_" READINGS_TABLE_MEM " ON " READINGS_TABLE_MEM " (asset_code, user_ts desc);"; const char * createReadingsIdx2 = "CREATE INDEX ix2_" READINGS_TABLE_MEM " ON " READINGS_TABLE_MEM " (user_ts);"; // Allow usage of URI for filename sqlite3_config(SQLITE_CONFIG_URI, 1); if (sqlite3_open(dbHandleConn, &dbHandle) != SQLITE_OK) { const char* dbErrMsg = sqlite3_errmsg(dbHandle); const char* errMsg = "Failed to open the IN_MEMORY SQLite3 database"; Logger::getLogger()->error("%s '%s'", dbErrMsg, dbHandleConn); connectErrorTime = time(0); raiseError("InMemory Connection", "%s '%s'", dbErrMsg, dbHandleConn); sqlite3_close_v2(dbHandle); } else { Logger::getLogger()->info("Connected to IN_MEMORY SQLite3 database: %s", dbHandleConn); int rc; // Exec the statements without getting error messages, for now // ATTACH 'fledge' as in memory shared DB rc = sqlite3_exec(dbHandle, "ATTACH DATABASE 'file::memory:?cache=shared' AS '" READINGS_TABLE_MEM "'", NULL, NULL, NULL); // CREATE TABLE readings rc = sqlite3_exec(dbHandle, createReadings, NULL, NULL, NULL); // FK rc = sqlite3_exec(dbHandle, createReadingsFk, NULL, NULL, NULL); // Idx1 rc = sqlite3_exec(dbHandle, createReadingsIdx1, NULL, NULL, NULL); // Idx2 rc = sqlite3_exec(dbHandle, createReadingsIdx2, NULL, NULL, NULL); } } /** * Add a vacuum funtion, this is not needed for SQLite In Memory, but is here * to satisfy the interface requirement. */ bool Connection::vacuum() { return true; } /** * Load the in memory database from a file backup * * @param filename The name of the file to restore from * @return bool Success or failure of the backup */ bool Connection::loadDatabase(const string& filename) { int rc; sqlite3 *file; sqlite3_backup *backup; string pathname = getDataDir() + "/"; pathname.append(filename); pathname.append(".db"); if (access(pathname.c_str(), R_OK) != 0) { Logger::getLogger()->warn("Persisted database %s does not exist", pathname.c_str()); return false; } if ((rc = sqlite3_open(pathname.c_str(), &file)) == SQLITE_OK) { if (backup = sqlite3_backup_init(dbHandle, READINGS_TABLE_MEM, file, "main")) { (void)sqlite3_backup_step(backup, -1); (void)sqlite3_backup_finish(backup); Logger::getLogger()->info("Reloaded persisted data to in-memory database"); } rc = sqlite3_errcode(dbHandle); (void)sqlite3_close(file); } return rc == SQLITE_OK; } /** * Backup the in memory database to a file * * @param filename The name of the file to backup to * @return bool Success or failure of the backup */ bool Connection::saveDatabase(const string& filename) { int rc; sqlite3 *file; sqlite3_backup *backup; string pathname = getDataDir() + "/"; pathname.append(filename); pathname.append(".db"); unlink(pathname.c_str()); if ((rc = sqlite3_open(pathname.c_str(), &file)) == SQLITE_OK) { if (backup = sqlite3_backup_init(file, "main", dbHandle, READINGS_TABLE_MEM)) { rc = sqlite3_backup_step(backup, -1); (void)sqlite3_backup_finish(backup); Logger::getLogger()->info("Persisted data from in-memory database to %s", pathname.c_str()); } rc = sqlite3_errcode(file); if (rc != SQLITE_OK) { Logger::getLogger()->warn("Persisting in-memory database failed: %s", sqlite3_errmsg(file)); } (void)sqlite3_close(file); } else { Logger::getLogger()->warn("Failed to open database %s to persist in-memory data", pathname.c_str()); } return rc == SQLITE_OK; } ================================================ FILE: C/plugins/storage/sqlitememory/include/connection.h ================================================ #ifndef _CONNECTION_H #define _CONNECTION_H /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sql_buffer.h> #include <string> #include <rapidjson/document.h> #include <sqlite3.h> WARNING: THIS FILE IS NOT USED #define READINGS_TABLE "readings" #define READINGS_TABLE_MEM READINGS_TABLE "_1" class Connection { public: Connection(); ~Connection(); bool retrieveReadings(const std::string& condition, std::string& resultSet); int appendReadings(const char *readings); bool fetchReadings(unsigned long id, unsigned int blksize, std::string& resultSet); unsigned int purgeReadings(unsigned long age, unsigned int flags, unsigned long sent, std::string& results); long tableSize(const std::string& table); void setTrace(bool flag) { m_logSQL = flag; }; static bool formatDate(char *formatted_date, size_t formatted_date_size, const char *date); unsigned int purgeReadingsAsset(const std::string& asset); bool vacuum(); bool loadDatabase(const std::string& filname); bool saveDatabase(const std::string& filname); void setPurgeBlockSize(unsigned long purgeBlockSize) { m_purgeBlockSize = purgeBlockSize; } private: int SQLexec(sqlite3 *db, const char *sql, int (*callback)(void*,int,char**,char**), void *cbArg, char **errmsg); bool m_logSQL; void raiseError(const char *operation, const char *reason,...); sqlite3 *inMemory; // Handle for :memory: database int mapResultSet(void *res, std::string& resultSet); bool jsonWhereClause(const rapidjson::Value& whereClause, SQLBuffer&, bool convertLocaltime = false); bool jsonModifiers(const rapidjson::Value&, SQLBuffer&); bool jsonAggregates(const rapidjson::Value&, const rapidjson::Value&, SQLBuffer&, SQLBuffer&, bool isTableReading = false); bool returnJson(const rapidjson::Value&, SQLBuffer&, SQLBuffer&); char *trim(char *str); const std::string escape(const std::string&); bool applyColumnDateTimeFormat(sqlite3_stmt *pStmt, int i, std::string& newDate); void logSQL(const char *, const char *); unsigned long m_purgeBlockSize; }; #endif ================================================ FILE: C/plugins/storage/sqlitememory/include/connection_manager.h ================================================ #ifndef _CONNECTION_MANAGER_H #define _CONNECTION_MANAGER_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_api.h> #include <list> #include <mutex> class Connection; /** * Singleton class to manage SQLite3 Memory connection pool */ class MemConnectionManager { public: static MemConnectionManager *getInstance(); void growPool(unsigned int); unsigned int shrinkPool(unsigned int); Connection *allocate(); void release(Connection *); void shutdown(); void setError(const char *, const char *, bool); PLUGIN_ERROR *getError() { return &lastError; } void setPersist(bool persist, const std::string& filename = "") { m_persist = persist; m_filename = filename; } bool persist() { return m_persist; }; std::string filename() { return m_filename; }; void setPurgeBlockSize(unsigned long purgeBlockSize); private: MemConnectionManager(); static MemConnectionManager *instance; std::list<Connection *> idle; std::list<Connection *> inUse; std::mutex idleLock; std::mutex inUseLock; std::mutex errorLock; PLUGIN_ERROR lastError; bool m_trace; bool m_persist; std::string m_filename; unsigned long m_purgeBlockSize; }; #endif ================================================ FILE: C/plugins/storage/sqlitememory/plugin.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sqlite_common.h> #include <connection_manager.h> #include <connection.h> #include <plugin_api.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include "sqlite3.h" #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include <config_category.h> #include <sstream> #include <iostream> #include <string> #include <logger.h> #include <plugin_exception.h> #include <reading_stream.h> using namespace std; using namespace rapidjson; /** * The SQLite3 plugin interface */ extern "C" { const char *default_config = QUOTE({ "poolSize" : { "description" : "The number of connections to create in the intial pool of connections", "type" : "integer", "default" : "5", "displayName" : "Pool Size", "order" : "1" }, "filename" : { "description" : "The name of the file to which the in-memory database should be persisted", "type" : "string", "default" : "inmemory", "displayName" : "Persist File", "order" : "3", "validity": "persist == \"true\"" }, "persist" : { "description" : "Enable the persistence of the in-memory database between executions", "type" : "boolean", "default" : "false", "displayName" : "Persist Data", "order" : "2" }, "purgeBlockSize" : { "description" : "The number of rows to purge in each delete statement", "type" : "integer", "default" : "10000", "displayName" : "Purge Block Size", "order" : "3", "minimum" : "1000", "maximum" : "100000" } }); /** * The plugin information structure */ static PLUGIN_INFORMATION info = { "SQLite3", // Name "1.1.0", // Version SP_READINGS, // Flags PLUGIN_TYPE_STORAGE, // Type "1.6.0", // Interface version default_config }; /** * Return the information about this plugin */ PLUGIN_INFORMATION *plugin_info() { return &info; } /** * Initialise the plugin, called to get the plugin handle * In the case of SQLLite we also get a pool of connections * to use. * * @param category The plugin configuration category */ PLUGIN_HANDLE plugin_init(ConfigCategory *category) { ConnectionManager *manager = ConnectionManager::getInstance(); int poolSize = 5; if (category->itemExists("poolSize")) { poolSize = strtol(category->getValue("poolSize").c_str(), NULL, 10); } manager->growPool(poolSize); if (category->itemExists("persist")) { string p = category->getValue("persist"); if (p.compare("true") == 0 && category->itemExists("filename")) { manager->setPersist(true, category->getValue("filename")); } else { manager->setPersist(false); } } else { manager->setPersist(false); } if (manager->persist()) { Connection *connection = manager->allocate(); connection->loadDatabase(manager->filename()); } if (category->itemExists("purgeBlockSize")) { unsigned long purgeBlockSize = strtoul(category->getValue("purgeBlockSize").c_str(), NULL, 10); manager->setPurgeBlockSize(purgeBlockSize); } return manager; } /** * Append a sequence of readings to the readings buffer */ int plugin_reading_append(PLUGIN_HANDLE handle, char *readings) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); int result = connection->appendReadings(readings); manager->release(connection); return result;; } /** * Append a stream of readings to the readings buffer */ int plugin_readingStream(PLUGIN_HANDLE handle, ReadingStream **readings, bool commit) { int result = 0; ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); result = connection->readingStream(readings, commit); manager->release(connection); return result;; } /** * Fetch a block of readings from the readings buffer */ char *plugin_reading_fetch(PLUGIN_HANDLE handle, unsigned long id, unsigned int blksize) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string resultSet; connection->fetchReadings(id, blksize, resultSet); manager->release(connection); return strdup(resultSet.c_str()); } /** * Retrieve some readings from the readings buffer */ char *plugin_reading_retrieve(PLUGIN_HANDLE handle, char *condition) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; connection->retrieveReadings(std::string(condition), results); manager->release(connection); return strdup(results.c_str()); } /** * Purge readings from the buffer */ char *plugin_reading_purge(PLUGIN_HANDLE handle, unsigned long param, unsigned int flags, unsigned long sent) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); std::string results; unsigned long age, size; if (flags & STORAGE_PURGE_SIZE) // Purge by size { (void)connection->purgeReadingsByRows(param, flags, sent, results); } else { age = param; (void)connection->purgeReadings(age, flags, sent, results); } manager->release(connection); return strdup(results.c_str()); } /** * Release a previously returned result set */ void plugin_release(PLUGIN_HANDLE handle, char *results) { (void)handle; free(results); } /** * Return details on the last error that occured. */ PLUGIN_ERROR *plugin_last_error(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; return manager->getError(); } /** * Shutdown the plugin */ bool plugin_shutdown(PLUGIN_HANDLE handle) { ConnectionManager *manager = (ConnectionManager *)handle; if (manager->persist()) { Connection *connection = manager->allocate(); connection->saveDatabase(manager->filename()); } manager->shutdown(); return true; } /** * Purge given readings asset or all readings from the buffer */ unsigned int plugin_reading_purge_asset(PLUGIN_HANDLE handle, char *asset) { ConnectionManager *manager = (ConnectionManager *)handle; Connection *connection = manager->allocate(); unsigned int deleted = connection->purgeReadingsAsset(asset); manager->release(connection); return deleted; } }; ================================================ FILE: C/plugins/utils/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.4.0) project(get_plugin_info) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") # Include header files include_directories(include ../../services/common/include) # Create get_plugin_info utility add_executable(${PROJECT_NAME} get_plugin_info.cpp) target_link_libraries(${PROJECT_NAME} -ldl) add_executable(cmdutil cmdutil.cpp) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/extras/C) install(TARGETS cmdutil DESTINATION fledge/extras/C) ================================================ FILE: C/plugins/utils/cmdutil.cpp ================================================ /* * Utility to run some commands for fledge as root using setuid * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> extern int errno; /** * Check whether file/dir exists within FLEDGE_ROOT * * @param rootdir FLEDGE_ROOT path * @param file relative path of file or dir inside FLEDGE_ROOT */ bool checkFile(char *rootdir, char *file) { char path[256]; snprintf(path, sizeof(path), "%s/%s", rootdir, file); return (access(path, F_OK) == 0); } const char *cmds[] = {"tar-extract", "cp", "rm", "pip3-pkg", "pip3-req", "mkdir"}; typedef enum { TAR_EXTRACT, CP, RM, PIP3_PKG, PIP3_REQ, MKDIR } cmdtype_t; char *argsArray[][6] = { {(char *) "/bin/tar", (char *) "-C", (char *) "PLACEHOLDER", (char *) "-xf", (char *) "PLACEHOLDER", NULL}, {(char *) "/bin/cp", (char *) "-r", (char *) "PLACEHOLDER", (char *) "PLACEHOLDER", NULL, NULL}, {(char *) "/bin/rm", (char *) "-rf", (char *) "PLACEHOLDER", NULL, NULL, NULL}, {(char *) "pip3", (char *) "install", (char *) "PLACEHOLDER", (char *) "--no-cache-dir", NULL, NULL}, {(char *) "pip3", (char *) "install", (char *) "-Ir", (char *) "PLACEHOLDER", (char *) "--no-cache-dir", NULL}, {(char *) "mkdir", (char *) "-p", (char *) "PLACEHOLDER", NULL, NULL, NULL} }; int getCmdType(const char *cmd) { for (int i=0; i<sizeof(cmds)/sizeof(const char *); i++) if (strcmp(cmd, cmds[i])==0) return i; return -1; } /** * Run some shell commands, if setuid bit is set, these cmds are run as root user * * Usage: cmdutil <cmd> <params> * * Example command to execute Way to invoke cmdutil to do so * -------------------------- ------------------------------- * sudo tar -C $FLEDGE_ROOT -xf abc.tar.gz cmdutil tar-extract abc.tar.gz * sudo cp -r abc $FLEDGE_ROOT/xyz cmdutil cp abc xyz * sudo rm -rf $FLEDGE_ROOT/abc cmdutil rm abc * * sudo pip3 install aiocoap==0.3 --no-cache-dir cmdutil pip3-pkg aiocoap==0.3 * sudo pip3 install -Ir requirements.txt --no-cache-dir cmdutil pip3-req requirements.txt * * sudo mkdir -p $FLEDGE_ROOT/abc cmdutil mkdir abc */ int main(int argc, char *argv[]) { if(argc < 2) { printf("Incorrect usage\n"); return 1; } char *rootdir = getenv("FLEDGE_ROOT"); if (!rootdir || rootdir[0]==0) { printf("Unable to find path where archive is to be extracted\n"); return 2; } struct stat sb; stat(rootdir, &sb); if ((sb.st_mode & S_IFMT) != S_IFDIR) { printf("Unable to find path where archive is to be extracted\n"); return 2; } if (!checkFile(rootdir, (char *) "bin/fledge") || !checkFile(rootdir, (char *) "services/fledge.services.storage") || !checkFile(rootdir, (char *) "python/fledge/services/core/routes.py") || !checkFile(rootdir, (char *) "lib/libcommon-lib.so") || !checkFile(rootdir, (char *) "tasks/sending_process")) { printf("Unable to find fledge insallation\n"); return 2; } int cmdtype = getCmdType(argv[1]); //printf("cmdtype=%d\n", cmdtype); if(cmdtype == -1) { printf("Unidentified command\n"); return 3; } char *args[6]; for(int i=0; i<6; i++) args[i] = argsArray[cmdtype][i]; char buf[128]; switch (cmdtype) { case TAR_EXTRACT: args[2] = rootdir; args[4] = argv[2]; break; case CP: args[2] = argv[2]; snprintf(buf, sizeof(buf), "%s/%s", rootdir, argv[3]); buf[sizeof(buf)-1] = '\0'; // force null terminate args[3] = buf; break; case RM: snprintf(buf, sizeof(buf), "%s/%s", rootdir, argv[2]); buf[sizeof(buf)-1] = '\0'; // force null terminate args[2] = buf; break; case PIP3_PKG: args[2] = argv[2]; break; case PIP3_REQ: args[3] = argv[2]; break; case MKDIR: snprintf(buf, sizeof(buf), "%s/%s", rootdir, argv[2]); buf[sizeof(buf)-1] = '\0'; // force null terminate args[2] = buf; break; default: printf("Unidentified command\n"); return 3; } //printf("cmd=%s %s %s %s %s %s\n", args[0], args[1], args[2], args[3]?args[3]:"", args[4]?args[4]:"", args[5]?args[5]:""); errno = 0; int rc = execvp(args[0], args); if (rc != 0) { printf("execvp failed: rc=%d, errno %d=%s\n", rc, errno, strerror(errno)); return rc; } return 0; } ================================================ FILE: C/plugins/utils/get_plugin_info.cpp ================================================ /* * Utility to extract plugin_info from north/south C plugin library * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <dlfcn.h> #include <syslog.h> #include "plugin_api.h" typedef PLUGIN_INFORMATION *(*func_t)(); /** * Extract value of a given symbol from given plugin library * * Usage: get_plugin_info <plugin library> <function symbol to fetch plugin info from> * * @param argv[1] relative/absolute path to north/south C plugin shared library * * @param argv[2] symbol to extract value from (defaults 'plugin_info') */ int main(int argc, char *argv[]) { void *hndl; char *routine = (char *)"plugin_info"; if (argc == 3) { routine = argv[2]; } else if (argc < 2) { fprintf(stderr, "Insufficient number of args...\n\nUsage: %s <plugin library> [ <function to fetch plugin info> ]\n", argv[0]); exit(1); } openlog("Fledge PluginInfo", LOG_PID|LOG_CONS, LOG_USER); setlogmask(LOG_UPTO(LOG_WARNING)); if (access(argv[1], F_OK|R_OK) != 0) { syslog(LOG_ERR, "Unable to access library file '%s', exiting...\n", argv[1]); exit(2); } if ((hndl = dlopen(argv[1], RTLD_GLOBAL|RTLD_LAZY)) != NULL) { func_t infoEntry = (func_t)dlsym(hndl, routine); if (infoEntry == NULL) { // Unable to find plugin_info entry point syslog(LOG_ERR, "Plugin library %s does not support %s function : %s\n", argv[1], routine, dlerror()); dlclose(hndl); closelog(); exit(3); } PLUGIN_INFORMATION *info = (PLUGIN_INFORMATION *)(*infoEntry)(); printf("{\"name\": \"%s\", \"version\": \"%s\", \"type\": \"%s\", \"interface\": \"%s\", \"flag\": %d, \"config\": %s}\n", info->name, info->version, info->type, info->interface, info->options, info->config); } else { syslog(LOG_ERR, "dlopen failed: %s\n", dlerror()); } closelog(); return 0; } ================================================ FILE: C/services/common/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.4.0) if( POLICY CMP0007 ) cmake_policy( SET CMP0007 NEW ) endif() project(services-common-lib) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -O0") set(DLLIB -ldl) # Find source files file(GLOB SOURCES *.cpp) # Find python3.x dev/lib package if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include ../../common/include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${DLLIB}) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/services/common/README.rst ================================================ Common C Code ============= This directory contains the C/C++ code that is common to one or more microservice or executable written in C or C++ withion Fledge. ================================================ FILE: C/services/common/config_handler.cpp ================================================ /* * Fledge config manager. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <config_handler.h> using namespace std; ConfigHandler *ConfigHandler::instance = 0; /** * ConfigHandler Singleton implementation */ ConfigHandler * ConfigHandler::getInstance(ManagementClient *mgtClient) { if (!instance) instance = new ConfigHandler(mgtClient); return instance; } /** * Config Handler Constructor */ ConfigHandler::ConfigHandler(ManagementClient *mgtClient) : m_mgtClient(mgtClient) { m_logger = Logger::getLogger(); } /** * Handle a callback from the core to propagate a configuration category * change and propagate that to all the local ServiceHandlers that have * registered for it. * * @param category The name of the category that has changed * @param config The configuration category itself */ void ConfigHandler::configChange(const string& category, const string& config) { m_logger->info("Configuration change notification for %s", category.c_str()); std::unique_lock<std::mutex> lck(m_mutex); pair<CONFIG_MAP::iterator, CONFIG_MAP::iterator> res = m_registrations.equal_range(category); for (CONFIG_MAP::iterator it = res.first; it != res.second; it++) { // The config change call could effect the registered handlers // we therefore need to guard against the map changing m_change = false; lck.unlock(); it->second->configChange(category, config); lck.lock(); if (m_change) // Something changed { return; // Call any other subscribers to this category. In reality there are no others } } } /** * Handle a callback from the core to handle the creation of a child category. * * @param parent_category The parent category of the child * @param child_category The name of the category that has created * @param config Configuration of the child category */ void ConfigHandler::configChildCreate(const std::string& parent_category, const string& child_category, const string& config) { std::unique_lock<std::mutex> lck(m_mutex); m_logger->info("Configuration change notification, child category created %s", child_category.c_str()); pair<CONFIG_MAP::iterator, CONFIG_MAP::iterator> res = m_registrationsChild.equal_range(parent_category); for (CONFIG_MAP::iterator it = res.first; it != res.second; it++) { // The config change call could effect the registered handlers // we therefore need to guard against the map changing m_change = false; lck.unlock(); it->second->configChildCreate(parent_category, child_category, config); lck.lock(); if (m_change) // Something changed { return; // Call any other subscribers to this category. In reality there are no others } } } /** * Handle a callback from the core to handle the deletion of a child category. * * @param parent_category The parent category of the child * @param child_category The name of the category that has created */ void ConfigHandler::configChildDelete(const std::string& parent_category, const string& child_category) { std::unique_lock<std::mutex> lck(m_mutex); m_logger->info("Configuration change notification, child category deleted %s", child_category.c_str()); pair<CONFIG_MAP::iterator, CONFIG_MAP::iterator> res = m_registrationsChild.equal_range(parent_category); for (CONFIG_MAP::iterator it = res.first; it != res.second; it++) { // The config change call could effect the registered handlers // we therefore need to guard against the map changing m_change = false; lck.unlock(); it->second->configChildDelete(parent_category, child_category); lck.lock(); if (m_change) // Something changed { return; // Call any other subscribers to this category. In reality there are no others } } } /** * Register a service handler for a given configuration category * * @param handler The service handler to call * @param category The configuration category to register */ void ConfigHandler::registerCategory(ServiceHandler *handler, const string& category) { if (m_registrations.count(category) == 0) { int retryCount = 0; while (m_mgtClient->registerCategory(category) == false && retryCount++ < 10) { sleep(2 * retryCount); } if (retryCount >= 10) { m_logger->error("Failed to register configuration category %s", category.c_str()); } else { m_logger->debug("Interest in %s registered", category.c_str()); } } else { m_logger->info("Interest in %s already registered", category.c_str()); } std::unique_lock<std::mutex> lck(m_mutex); m_registrations.insert(pair<string, ServiceHandler *>(category, handler)); m_change = true; } /** * Register a service handler for a given configuration category when a children category is changed * * @param handler The service handler to call * @param category The configuration category to register */ void ConfigHandler::registerCategoryChild(ServiceHandler *handler, const string& category) { if (m_registrationsChild.count(category) == 0) { int retryCount = 0; while (m_mgtClient->registerCategoryChild(category) == false && retryCount++ < 10) { sleep(2 * retryCount); } if (retryCount >= 10) { m_logger->error("Failed to register configuration category %s", category.c_str()); } else { m_logger->debug("Interest in children categories of %s registered", category.c_str()); } } else { m_logger->info("Interest in children categories of %s already registered", category.c_str()); } std::unique_lock<std::mutex> lck(m_mutex); m_registrationsChild.insert(pair<string, ServiceHandler *>(category, handler)); m_change = true; } /** * Unregister a configuration category from the ConfigHandler for * a particular registered ServiceHandler class * * @param handler The configuration handler we would call * @param category The category to remove. */ void ConfigHandler::unregisterCategory(ServiceHandler *handler, const string& category) { std::unique_lock<std::mutex> lck(m_mutex); pair<CONFIG_MAP::iterator, CONFIG_MAP::iterator> res = m_registrations.equal_range(category); for (CONFIG_MAP::iterator it = res.first; it != res.second; it++) { if (it->second == handler) { m_registrations.erase(it); break; } } // No remaining registration for this category if (m_registrations.count(category) == 0) { m_mgtClient->unregisterCategory(category); } m_change = true; } ================================================ FILE: C/services/common/filter_python_plugin_handle.cpp ================================================ /* * Fledge Filter Python plugin handle related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <config_category.h> #include <reading.h> #include <logger.h> #include <filter_python_plugin_handle.h> #define PYTHON_PLUGIN_INTF_LIB "libfilter-plugin-python-interface.so" #define PRINT_FUNC Logger::getLogger()->info("%s:%d", __FUNCTION__, __LINE__); typedef PLUGIN_INFORMATION *(*pluginInitFn)(const char *pluginName, const char *path); using namespace std; /** * Constructor for FilterPythonPluginHandle * - Load python interface library and initialize the interface */ FilterPythonPluginHandle::FilterPythonPluginHandle(const char *pluginName, const char *pluginPathName) : PythonPluginHandle(pluginName, pluginPathName) { // expecting this lib to be present in LD_LIBRARY_PATH: //same dir as where lib-services-common.so is present m_interfaceObjName = PYTHON_PLUGIN_INTF_LIB; // Open interface library object m_hndl = dlopen(m_interfaceObjName.c_str(), RTLD_NOW | RTLD_GLOBAL); if (!m_hndl) { Logger::getLogger()->error("FilterPythonPluginHandle c'tor: dlopen failed for library '%s' : %s", m_interfaceObjName.c_str(), dlerror()); return; } pluginInitFn initFn = (pluginInitFn) dlsym(m_hndl, "PluginInterfaceInit"); if (initFn == NULL) { // Unable to find PluginInterfaceInit entry point Logger::getLogger()->error("Plugin library %s does not support %s function : %s", m_interfaceObjName.c_str(), "PluginInterfaceInit", dlerror()); dlclose(m_hndl); m_hndl = NULL; return; } // Initialise Python plugin object void *ref = initFn(pluginName, pluginPathName); if (ref == NULL) { fprintf(stderr, "Plugin library %s : PluginInterfaceInit returned failure", m_interfaceObjName.c_str()); dlclose(m_hndl); m_hndl = NULL; return; } // Set type m_type = PLUGIN_TYPE_FILTER; } ================================================ FILE: C/services/common/include/binary_plugin_handle.h ================================================ #ifndef _BINARY_PLUGIN_HANDLE_H #define _BINARY_PLUGIN_HANDLE_H /* * Fledge plugin handle related * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <logger.h> #include <dlfcn.h> #include <plugin_handle.h> #include <plugin_manager.h> /** * The BinaryPluginHandle class is used to represent an interface to * a plugin that is available in a binary format */ class BinaryPluginHandle : public PluginHandle { public: // for the Storage plugin BinaryPluginHandle(const char *name, const char *path, tPluginType type) { dlerror(); // Clear the existing error handle = dlopen(path, RTLD_LAZY); if (!handle) { Logger::getLogger()->error("Unable to load storage plugin %s, %s", name, dlerror()); } Logger::getLogger()->debug("%s - storage plugin / RTLD_LAZY - name :%s: path :%s:", __FUNCTION__, name, path); } // for all the others plugins BinaryPluginHandle(const char *name, const char *path) { dlerror(); // Clear the existing error handle = dlopen(path, RTLD_LAZY|RTLD_GLOBAL); if (!handle) { Logger::getLogger()->error("Unable to load plugin %s, %s", name, dlerror()); } Logger::getLogger()->debug("%s - other plugin / RTLD_LAZY|RTLD_GLOBAL - name :%s: path :%s:", __FUNCTION__, name, path); } ~BinaryPluginHandle() { if (handle) dlclose(handle); } void *GetInfo() { return dlsym(handle, "plugin_info"); } void *ResolveSymbol(const char* sym) { return dlsym(handle, sym); } void *getHandle() { return handle; } private: PLUGIN_HANDLE handle; // pointer returned by dlopen on plugin shared lib }; #endif ================================================ FILE: C/services/common/include/config_handler.h ================================================ #ifndef _CONFIG_HANDLER_H #define _CONFIG_HANDLER_H /* * Fledge * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <service_handler.h> #include <management_client.h> #include <config_category.h> #include <logger.h> #include <string> #include <map> #include <mutex> typedef std::multimap<std::string, ServiceHandler *> CONFIG_MAP; /** * Handler class within a service to manage configuration changes */ class ConfigHandler { public: static ConfigHandler *getInstance(ManagementClient *); void configChange(const std::string& category, const std::string& config); void configChildCreate(const std::string& parent_category, const std::string& child_category, const std::string& config); void configChildDelete(const std::string& parent_category, const std::string& child_category); void registerCategory(ServiceHandler *handler, const std::string& category); void registerCategoryChild(ServiceHandler *handler, const std::string& category); void unregisterCategory(ServiceHandler *handler, const std::string& category); static ConfigHandler *instance; private: ConfigHandler(ManagementClient *); ~ConfigHandler(); ManagementClient *m_mgtClient; CONFIG_MAP m_registrations; CONFIG_MAP m_registrationsChild; Logger *m_logger; std::mutex m_mutex; bool m_change; }; #endif ================================================ FILE: C/services/common/include/filter_python_plugin_handle.h ================================================ #ifndef _FILTER_PYTHON_PLUGIN_HANDLE_H #define _FILTER_PYTHON_PLUGIN_HANDLE_H /* * Fledge Notification Python plugin handle related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <vector> #include <sstream> #include <dlfcn.h> #include <python_plugin_handle.h> /** * The PythonPluginHandle class is used to represent an interface to * a plugin that is available as a python script */ class FilterPythonPluginHandle : public PythonPluginHandle { public: FilterPythonPluginHandle(const char *name, const char *path); }; #endif ================================================ FILE: C/services/common/include/management_api.h ================================================ #ifndef _MANAGEMENT_API_H #define _MANAGEMENT_API_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <json_provider.h> #include <service_handler.h> #include <server_http.hpp> #include <logger.h> #include <string> #include <time.h> #include <thread> #define PING "/fledge/service/ping" #define SERVICE_SHUTDOWN "/fledge/service/shutdown" #define CONFIG_CHANGE "/fledge/change" #define CONFIG_CHILD_CREATE "/fledge/child_create" #define CONFIG_CHILD_DELETE "/fledge/child_delete" #define SECURITY_CHANGE "^/fledge/security$" using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; /** * Management API server for a C++ microservice */ class ManagementApi { public: ManagementApi(const std::string& name, const unsigned short port); ~ManagementApi(); static ManagementApi *getInstance(); void start(); void startServer(); void stop(); void stopServer(); void registerStats(JSONProvider *statsProvider); void registerProvider(JSONProvider *provider); void registerService(ServiceHandler *serviceHandler) { m_serviceHandler = serviceHandler; } unsigned short getListenerPort() { return m_server->getLocalPort(); } void ping(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); void shutdown(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); void configChange(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); void configChildCreate(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); void configChildDelete(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); void securityChange(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); protected: static ManagementApi *m_instance; std::string m_name; Logger *m_logger; time_t m_startTime; HttpServer *m_server; JSONProvider *m_statsProvider; ServiceHandler *m_serviceHandler; std::thread *m_thread; private: void respond(std::shared_ptr<HttpServer::Response>, const std::string&); std::vector<JSONProvider *> m_providers; }; #endif ================================================ FILE: C/services/common/include/north_python_plugin_handle.h ================================================ #ifndef _NORTH_PYTHON_PLUGIN_HANDLE_H #define _NORTH_PYTHON_PLUGIN_HANDLE_H /* * Fledge plugin handle related * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <vector> #include <sstream> #include <dlfcn.h> #include <python_plugin_handle.h> /** * The NorthPythonPluginHandle class is used to represent an interface to * a South plugin that is available as a python script */ class NorthPythonPluginHandle : public PythonPluginHandle { public: NorthPythonPluginHandle(const char *name, const char *path); }; #endif ================================================ FILE: C/services/common/include/notification_python_plugin_handle.h ================================================ #ifndef _NOTIFICATION_PYTHON_PLUGIN_HANDLE_H #define _NOTIFICATION_PYTHON_PLUGIN_HANDLE_H /* * Fledge Notification Python plugin handle related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <vector> #include <sstream> #include <dlfcn.h> #include <python_plugin_handle.h> /** * The PythonPluginHandle class is used to represent an interface to * a plugin that is available as a python script */ class NotificationPythonPluginHandle : public PythonPluginHandle { public: NotificationPythonPluginHandle(const char *name, const char *path); }; #endif ================================================ FILE: C/services/common/include/perfmonitors.h ================================================ #ifndef _PERFMONITOR_H #define _PERFMONITOR_H /* * Fledge performance monitor * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <thread> #include <storage_client.h> #include <unordered_map> #include <insert.h> #include <mutex> #include <condition_variable> class PerfMon { public: PerfMon(const std::string& name); void addValue(long value); int getValues(InsertValues& values); private: std::string m_name; long m_average; long m_min; long m_max; int m_samples; std::mutex m_mutex; }; /** * Class to handle the performance monitors */ class PerformanceMonitor { public: PerformanceMonitor(const std::string& service, StorageClient *storage); // Write data to storage virtual void writeData(const std::string& table, const InsertValues& values) { // Write data via storage client if (m_storage != NULL) { m_storage->insertTable(table, values); } else { Logger::getLogger()->error("Failed to save performace monitor data: "\ "storage client is null for servide '%s'", m_service.c_str()); } }; virtual ~PerformanceMonitor(); /** * Collect a performance monitor * * @param name Name of the monitor * @param calue Value of the monitor */ inline void collect(const std::string& name, long value) { if (m_collecting) { doCollection(name, value); } }; void setCollecting(bool state); void writeThread(); bool isCollecting() { return m_collecting; }; private: void doCollection(const std::string& name, long value); private: std::string m_service; StorageClient *m_storage; std::thread *m_thread; bool m_collecting; std::unordered_map<std::string, PerfMon *> m_monitors; std::condition_variable m_cv; std::mutex m_mutex; }; #endif ================================================ FILE: C/services/common/include/plugin.h ================================================ #ifndef _PLUGIN_H #define _PLUGIN_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_api.h> class PluginManager; /** * A generic representation of a plugin */ class Plugin { public: Plugin(PLUGIN_HANDLE handle); ~Plugin(); const PLUGIN_INFORMATION *getInfo(); PLUGIN_HANDLE getHandle() { return handle; } protected: PLUGIN_HANDLE handle; PluginManager *manager; PLUGIN_INFORMATION *info; }; #endif ================================================ FILE: C/services/common/include/plugin_api.h ================================================ #ifndef _PLUGIN_API #define _PLUGIN_API /* * Fledge storage service. * * Copyright (c) 2017,2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <string> #define TO_STRING(...) DEFER(TO_STRING_)(__VA_ARGS__) #define DEFER(x) x #define TO_STRING_(...) #__VA_ARGS__ #define QUOTE(...) TO_STRING(__VA_ARGS__) /** * The plugin infiornation structure, used to return information * from a plugin during the laod and configuration stage. */ typedef struct { /** The name of the plugin */ const char *name; /** The release version of the plugin */ const char *version; /** The set of option flags that apply to this plugin */ unsigned int options; /** * The plugin type, this is one of storage, south, * filter, north, notificationRule or notificationDelivery */ const char *type; /** The interface version of this plugin */ const char *interface; /** The default JSON configuration category for this plugin */ const char *config; } PLUGIN_INFORMATION; /** * Structure used by plugins to return error information */ typedef struct { char *message; char *entryPoint; bool retryable; } PLUGIN_ERROR; /** * Pass a name/value pair to a plugin */ typedef struct plugin_parameter { std::string name; std::string value; } PLUGIN_PARAMETER; /** * The handle used to reference a plugin. This is an opaque data * pointer and is used by the plugins as a way to pass information * between each invocation of the plugin entry points. */ typedef void * PLUGIN_HANDLE; /** * The destinations to which control messages may be sent */ typedef enum controlDestination { /** The control message is destined for the source of a particular asset */ DestinationAsset, /** The control message is destined for the named service */ DestinationService, /** The control message is destined for all south services that support control */ DestinationBroadcast, /** The control message is destined to execute the named script */ DestinationScript } ControlDestination; /** * Plugin options bitmask values */ #define SP_COMMON 0x0001 #define SP_READINGS 0x0002 /** The plugin ingests data asynchronously */ #define SP_ASYNC 0x0004 /** The plugin wishes to persist data between executions */ #define SP_PERSIST_DATA 0x0008 /** The notification delivery plugin wishes to add (ingest) new data into the system */ #define SP_INGEST 0x0010 /** The plugin requires access to the Microservice Management API */ #define SP_GET_MANAGEMENT 0x0020 /** The plugin requires direct access to the storage service */ #define SP_GET_STORAGE 0x0040 /** The plugin has been deprecated and will be removed in a future release */ #define SP_DEPRECATED 0x0080 /** The plugin is built in and not installed be a seperate package */ #define SP_BUILTIN 0x0100 /** The plugin supports control data */ #define SP_CONTROL 0x1000 /** * Plugin types */ #define PLUGIN_TYPE_STORAGE "storage" #define PLUGIN_TYPE_SOUTH "south" #define PLUGIN_TYPE_NORTH "north" #define PLUGIN_TYPE_FILTER "filter" #define PLUGIN_TYPE_NOTIFICATION_RULE "notificationRule" #define PLUGIN_TYPE_NOTIFICATION_DELIVERY "notificationDelivery" #endif ================================================ FILE: C/services/common/include/plugin_exception.h ================================================ /* * Fledge services common. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ /** * Implementation of PluginNotImplementedException. * This exception should be thrown when a feature is not implemented yet. */ class PluginNotImplementedException : public std::exception { public: // Construct with a default error message: PluginNotImplementedException(const char * error = "Functionality not implemented yet!") { errorMessage = error; } // Compatibility with std::exception. const char * what() const noexcept { return errorMessage.c_str(); } private: std::string errorMessage; }; ================================================ FILE: C/services/common/include/plugin_handle.h ================================================ #ifndef _PLUGIN_HANDLE_H #define _PLUGIN_HANDLE_H /* * Fledge plugin handle related * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <logger.h> #include <vector> #include <sstream> #include <unordered_map> #include <dlfcn.h> #include <plugin_api.h> /** * The PluginHandle class is used to represent an opaque handle to a * plugin instance */ class PluginHandle { public: PluginHandle() {} virtual ~PluginHandle() {} virtual void *GetInfo() = 0; virtual void *ResolveSymbol(const char* sym) = 0; virtual void *getHandle() = 0; }; #endif ================================================ FILE: C/services/common/include/plugin_manager.h ================================================ #ifndef PLUGIN_MANAGER_H #define PLUGIN_MANAGER_H /* * Fledge storage service. * * Copyright (c) 2017, 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <plugin_api.h> #include <plugin_handle.h> #include <logger.h> #include <string> #include <list> #include <map> #include <vector> typedef enum PluginType { PLUGIN_TYPE_ID_STORAGE, PLUGIN_TYPE_ID_OTHER } tPluginType; enum PLUGIN_TYPE { BINARY_PLUGIN, PYTHON_PLUGIN, JSON_PLUGIN }; /** * The manager for plugins. * * This manager is a singleton and is responsible for loading, tracking and unloading * the plugins within the system. */ class PluginManager { public: static PluginManager *getInstance(); PLUGIN_HANDLE loadPlugin(const std::string& name, const std::string& type); void unloadPlugin(PLUGIN_HANDLE handle); void* resolveSymbol(PLUGIN_HANDLE handle, const std::string& symbol); PLUGIN_HANDLE findPluginByName(const std::string& name); PLUGIN_HANDLE findPluginByType(const std::string& type); PLUGIN_INFORMATION *getInfo(const PLUGIN_HANDLE); void getInstalledPlugins(const std::string& type, std::list<std::string>& plugins); void setPluginType(tPluginType type); PLUGIN_TYPE getPluginImplType(const PLUGIN_HANDLE hndl) { return pluginImplTypes[hndl]; } std::vector<std::string> getPluginsByFlags(const std::string& type, unsigned int flags); public: static PluginManager* instance; private: PluginManager(); std::string findPlugin(std::string name, std::string _type, std::string _plugin_path, PLUGIN_TYPE type); private: std::list<PLUGIN_HANDLE> plugins; std::map<std::string, PLUGIN_HANDLE> pluginNames; std::map<std::string, std::string> pluginTypes; std::map<PLUGIN_HANDLE, PLUGIN_TYPE> pluginImplTypes; std::map<PLUGIN_HANDLE, PLUGIN_INFORMATION *> pluginInfo; std::map<PLUGIN_HANDLE, PluginHandle*> pluginHandleMap; Logger* logger; tPluginType m_pluginType; }; #endif ================================================ FILE: C/services/common/include/python_plugin_handle.h ================================================ #ifndef _PYTHON_PLUGIN_HANDLE_H #define _PYTHON_PLUGIN_HANDLE_H /* * Fledge Base plugin handle class * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora, Massimiliano Pinto */ #include <logger.h> #include <vector> #include <sstream> #include <dlfcn.h> #include <plugin_handle.h> #include <Python.h> typedef void* (*pluginResolveSymbolFn)(const char *, const std::string&); typedef void (*pluginCleanupFn)(const std::string&); /** * The PythonPluginHandle class is the base class used to represent an interface to * a plugin that is available as a python script */ class PythonPluginHandle : public PluginHandle { public: // Base constructor PythonPluginHandle(const char *name, const char *path) : m_name(name) {}; /** * Base destructor * - Call cleanup on python plugin interface * - Close python plugin interface library handle */ ~PythonPluginHandle() { if (!m_hndl) { return; } pluginCleanupFn cleanupFn = (pluginCleanupFn) dlsym(m_hndl, "PluginInterfaceCleanup"); if (cleanupFn == NULL) { // Unable to find PluginInterfaceCleanup entry point Logger::getLogger()->error("Plugin library %s does not support %s function : %s", m_interfaceObjName.c_str(), "PluginInterfaceCleanup", dlerror()); } else { cleanupFn(m_name); } dlclose(m_hndl); m_hndl = NULL; }; /** * Gets function pointer from loaded python interface library that can * be invoked to call 'sym' function in python plugin * * @param sym The symbol to fetch */ void *ResolveSymbol(const char* sym) { if (!m_hndl) { return NULL; } pluginResolveSymbolFn resolvSymFn = (pluginResolveSymbolFn) dlsym(m_hndl, "PluginInterfaceResolveSymbol"); if (resolvSymFn == NULL) { // Unable to find PluginInterfaceResolveSymbol entry point Logger::getLogger()->error("Plugin library %s does not support " "%s function : %s", m_interfaceObjName.c_str(), "PluginInterfaceResolveSymbol", dlerror()); return NULL; } void *rv = resolvSymFn(sym, m_name); if (!rv) { // Python filter plugins do not support plugin_start // just log a debug message if (m_type.compare(PLUGIN_TYPE_FILTER) == 0) { Logger::getLogger()->debug("PythonPluginHandle::ResolveSymbol " "returning NULL for sym=%s, plugin %s, type %s", sym, m_name.c_str(), m_type.c_str()); } else { Logger::getLogger()->error("PythonPluginHandle::ResolveSymbol " "returning NULL for sym=%s, plugin %s, type %s", sym, m_name.c_str(), m_type.c_str()); } } return rv; }; /** * Returns function pointer that can be invoked to call 'plugin_info' * function in python plugin */ void *GetInfo() { return (void *) ResolveSymbol("plugin_info"); }; // Return pointer to this class void *getHandle() { return this; } public: // The python plugin interface library shared object void* m_hndl; // The interface library name to load std::string m_interfaceObjName; // Set plugin name std::string m_name; // plugin type std::string m_type; }; #endif ================================================ FILE: C/services/common/include/service_handler.h ================================================ #ifndef _SERVICE_HANDLER_H #define _SERVICE_HANDLER_H /* * Fledge service class * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <config_category.h> #include <string> #include <management_client.h> /** * ServiceHandler abstract class - the interface that services using the * management API must provide. */ class ServiceHandler { public: virtual void shutdown() = 0; virtual void restart() = 0; virtual void configChange(const std::string& category, const std::string& config) = 0; virtual void configChildCreate(const std::string& parent_category, const std::string& category, const std::string& config) = 0; virtual void configChildDelete(const std::string& parent_category, const std::string& category) = 0; virtual bool isRunning() = 0; virtual bool securityChange(const std::string &payload) { return payload.empty(); }; }; /** * ServiceAuthHandler adds security to the base class ServiceHandler */ class ServiceAuthHandler : public ServiceHandler { public: ServiceAuthHandler() : m_refreshThread(NULL), m_refreshRunning(true) {}; virtual ~ServiceAuthHandler() { if (m_refreshThread) { m_refreshRunning = false; m_refreshThread->join(); delete m_refreshThread; } }; std::string& getName() { return m_name; }; std::string& getType() { return m_type; }; bool createSecurityCategories(ManagementClient* mgtClient, bool dryRun); bool updateSecurityCategory(const std::string& newCategory); void setInitialAuthenticatedCaller(); void setAuthenticatedCaller(bool enabled); bool getAuthenticatedCaller(); // ACL verification (for Dispatcher) bool AuthenticationMiddlewareACL(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request, const std::string& serviceName, const std::string& serviceType); // Hanlder for Dispatcher bool AuthenticationMiddlewareCommon(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request, std::string& callerName, std::string& callerType); // Handler for South services: token verifation and service ACL check void AuthenticationMiddlewarePUT(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request, std::function<void( std::shared_ptr<HttpServer::Response>, std::shared_ptr<HttpServer::Request>)> funcPUT); void refreshBearerToken(); // Send a good HTTP response to the caller void respond(std::shared_ptr<HttpServer::Response> response, const std::string& payload) { *response << "HTTP/1.1 200 OK\r\n" << "Content-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; }; // Send an error messagei HTTP response to the caller with given HTTP code void respond(std::shared_ptr<HttpServer::Response> response, SimpleWeb::StatusCode code, const std::string& payload) { *response << "HTTP/1.1 " << status_code(code) << "\r\n" << "Content-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; }; static ManagementClient * getMgmtClient() { return m_mgtClient; }; bool securityChange(const std::string &payload); private: bool verifyURL(const std::string& path, const std::string& sName, const std::string& sType); bool verifyService(const std::string& sName, const std::string &sType); protected: std::string m_name; std::string m_type; // Management client pointer static ManagementClient *m_mgtClient; private: // Security configuration change mutex std::mutex m_mtx_config; // Authentication is enabled for API endpoints bool m_authentication_enabled; // Security configuration ConfigCategory m_security; // Service ACL ACL m_service_acl; std::thread *m_refreshThread; bool m_refreshRunning; }; #endif ================================================ FILE: C/services/common/include/south_python_plugin_handle.h ================================================ #ifndef _SOUTH_PYTHON_PLUGIN_HANDLE_H #define _SOUTH_PYTHON_PLUGIN_HANDLE_H /* * Fledge plugin handle related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <vector> #include <sstream> #include <dlfcn.h> #include <python_plugin_handle.h> /** * The SouthPythonPluginHandle class is used to represent an interface to * a South plugin that is available as a python script */ class SouthPythonPluginHandle : public PythonPluginHandle { public: SouthPythonPluginHandle(const char *name, const char *path); }; #endif ================================================ FILE: C/services/common/management_api.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <management_api.h> #include <config_handler.h> #include <rapidjson/document.h> #include <logger.h> #include <time.h> #include <sstream> using namespace std; using namespace rapidjson; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; ManagementApi *ManagementApi::m_instance = 0; /** * Wrapper for ping method * * @param response The HTTP response * @param request The HTTP request */ void pingWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ManagementApi *api = ManagementApi::getInstance(); api->ping(response, request); } /** * Wrapper for shutdown method * * @param response The HTTP response * @param request The HTTP request */ void shutdownWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ManagementApi *api = ManagementApi::getInstance(); api->shutdown(response, request); } /** * Wrapper for config change method * * @param response The HTTP response * @param request The HTTP request */ void configChangeWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ManagementApi *api = ManagementApi::getInstance(); api->configChange(response, request); } /** * Wrapper for config child create method * * @param response The HTTP response * @param request The HTTP request */ void configChildCreateWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ManagementApi *api = ManagementApi::getInstance(); api->configChildCreate(response, request); } /** * Wrapper for config child delete method * * @param response The HTTP response * @param request The HTTP request */ void configChildDeleteWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ManagementApi *api = ManagementApi::getInstance(); api->configChildDelete(response, request); } /** * Wrapper for security change method * * @param response The HTTP response * @param request The HTTP request */ void securityChangeWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ManagementApi *api = ManagementApi::getInstance(); api->securityChange(response, request); } /** * Construct a microservices management API manager class * * @param name The service name * @param port The management API port */ ManagementApi::ManagementApi(const string& name, const unsigned short port) : m_name(name) { m_server = new HttpServer(); m_logger = Logger::getLogger(); m_server->config.port = port; m_startTime = time(0); m_statsProvider = 0; m_server->resource[PING]["GET"] = pingWrapper; m_server->resource[SERVICE_SHUTDOWN]["POST"] = shutdownWrapper; m_server->resource[CONFIG_CHANGE]["POST"] = configChangeWrapper; m_server->resource[CONFIG_CHILD_CREATE]["POST"] = configChildCreateWrapper; m_server->resource[CONFIG_CHILD_DELETE]["DELETE"] = configChildDeleteWrapper; m_server->resource[SECURITY_CHANGE]["PUT"] = securityChangeWrapper; m_instance = this; m_logger->info("Starting management api on port %d.", port); } /** * Start HTTP server for management API */ static void startService() { ManagementApi::getInstance()->startServer(); } void ManagementApi::start() { m_thread = new thread(startService); } void ManagementApi::startServer() { m_server->start(); } void ManagementApi::stop() { this->stopServer(); } void ManagementApi::stopServer() { m_server->stop(); m_thread->join(); } /** * Return the signleton instance of the management interface * * Note if one has not been explicitly created then this will * return 0. */ ManagementApi *ManagementApi::getInstance() { return m_instance; } /** * Management API destructor */ ManagementApi::~ManagementApi() { delete m_server; delete m_thread; } /** * Register a statistics provider * * @param statsProvider The statistics provider for the service */ void ManagementApi::registerStats(JSONProvider *statsProvider) { m_statsProvider = statsProvider; } /** * Register a generic provider. There can be multiple providers for * a single service * * @param provider The JSON status provider to add */ void ManagementApi::registerProvider(JSONProvider *provider) { m_providers.emplace_back(provider); } /** * Received a ping request, construct a reply and return to caller * * @param response The HTTP response * @param request The HTTP request */ void ManagementApi::ping(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; string responsePayload; (void)request; // Unused argument convert << "{ \"uptime\" : " << time(0) - m_startTime << ","; convert << "\"name\" : \"" << m_name << "\""; for (auto& p : m_providers) { string data; p->asJSON(data); convert << ", " << data; } if (m_statsProvider) { string stats; m_statsProvider->asJSON(stats); convert << ", \"statistics\" : " << stats; } convert << " }"; responsePayload = convert.str(); respond(response, responsePayload); } /** * Received a shutdown request, construct a reply and return to caller * * @param response The HTTP response * @param request The HTTP request */ void ManagementApi::shutdown(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; string responsePayload; (void)request; // Unsused argument m_serviceHandler->shutdown(); convert << "{ \"message\" : \"Shutdown in progress\" }"; responsePayload = convert.str(); respond(response, responsePayload); } /** * Received a config change request, construct a reply and return to caller * * @param response The HTTP response * @param request The HTTP request */ void ManagementApi::configChange(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; string responsePayload; string payload; try { payload = request->content.string(); ConfigCategoryChange conf(payload); ConfigHandler *handler = ConfigHandler::getInstance(NULL); handler->configChange(conf.getName(), conf.itemsToJSON(true)); convert << "{ \"message\" : \"Config change accepted\" }"; } catch(const std::exception& e) { convert << "{ \"exception\" : \"" << e.what() << "\" }"; } catch(...) { convert << "{ \"exception\" : \"generic\" }"; } responsePayload = convert.str(); respond(response, responsePayload); } /** * Received a children deletion request, construct a reply and return to caller * * @param response The HTTP response * @param request The HTTP request */ void ManagementApi::configChildDelete(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; string responsePayload; string category, items, payload, parent_category; payload = request->content.string(); ConfigCategoryChange conf(payload); ConfigHandler *handler = ConfigHandler::getInstance(NULL); parent_category = conf.getmParentName(); category = conf.getName(); items = conf.itemsToJSON(true); Logger::getLogger()->debug("%s - parent_category:%s: child_category:%s: items:%s: ", __FUNCTION__ , parent_category.c_str() , category.c_str() , items.c_str() ); handler->configChildDelete(parent_category, category); convert << "{ \"message\" ; \"Config child category change accepted\" }"; responsePayload = convert.str(); respond(response, responsePayload); } /** * Received a children creation request, construct a reply and return to caller * * @param response The HTTP response * @param request The HTTP request */ void ManagementApi::configChildCreate(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; string responsePayload; string category, items, payload, parent_category; payload = request->content.string(); ConfigCategoryChange conf(payload); ConfigHandler *handler = ConfigHandler::getInstance(NULL); parent_category = conf.getmParentName(); category = conf.getName(); items = conf.itemsToJSON(true); Logger::getLogger()->debug("%s - parent_category:%s: child_category:%s: items:%s: ", __FUNCTION__ , parent_category.c_str() , category.c_str() , items.c_str() ); handler->configChildCreate(parent_category, category, items); convert << "{ \"message\" ; \"Config child category change accepted\" }"; responsePayload = convert.str(); respond(response, responsePayload); } /** * HTTP response method */ void ManagementApi::respond(shared_ptr<HttpServer::Response> response, const string& payload) { *response << "HTTP/1.1 200 OK\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } /** * Received a security change request, construct a reply and return to caller * * @param response The HTTP response * @param request The HTTP request */ void ManagementApi::securityChange(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload = request->content.string(); Logger::getLogger()->debug("Received securityChange: %s", payload.c_str()); ostringstream convert; string responsePayload; // Call server securityChange method m_serviceHandler->securityChange(payload); convert << "{ \"message\" : \"Security change accepted\" }"; responsePayload = convert.str(); respond(response, responsePayload); } ================================================ FILE: C/services/common/north_python_plugin_handle.cpp ================================================ /* * Fledge plugin handle related * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <config_category.h> #include <reading.h> #include <logger.h> #include <north_python_plugin_handle.h> #define PYTHON_PLUGIN_INTF_LIB "libnorth-plugin-python-interface.so" #define PRINT_FUNC Logger::getLogger()->info("%s:%d", __FUNCTION__, __LINE__); typedef PLUGIN_INFORMATION *(*pluginInitFn)(const char *pluginName, const char *path); using namespace std; /** * Constructor for NorthPythonPluginHandle * - Load python interface library and initialize the interface * * @param pluginName The Python plugin name to load * @param pluginPathName The Python plugin path */ NorthPythonPluginHandle::NorthPythonPluginHandle(const char *pluginName, const char *pluginPathName) : PythonPluginHandle(pluginName, pluginPathName) { // expecting this lib to be present in LD_LIBRARY_PATH: //same dir as where lib-services-common.so is present string libPath = PYTHON_PLUGIN_INTF_LIB; m_hndl = dlopen(libPath.c_str(), RTLD_NOW | RTLD_GLOBAL); if (!m_hndl) { Logger::getLogger()->error("PythonPluginHandle c'tor: dlopen failed for library '%s' : %s", libPath.c_str(), dlerror()); return; } pluginInitFn initFn = (pluginInitFn) dlsym(m_hndl, "PluginInterfaceInit"); if (initFn == NULL) { // Unable to find PluginInterfaceInit entry point Logger::getLogger()->error("Plugin library %s does not support %s function : %s", libPath.c_str(), "PluginInterfaceInit", dlerror()); dlclose(m_hndl); m_hndl = NULL; return; } // Initialise embedded Python and the interface void *ref = initFn(pluginName, pluginPathName); if (ref == NULL) { fprintf(stderr, "Plugin library %s : PluginInterfaceInit returned failure", libPath.c_str()); dlclose(m_hndl); m_hndl = NULL; return; } // Set type m_type = PLUGIN_TYPE_NORTH; } ================================================ FILE: C/services/common/notification_python_plugin_handle.cpp ================================================ /* * Fledge Notification Python plugin handle related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <config_category.h> #include <reading.h> #include <logger.h> #include <notification_python_plugin_handle.h> #define PYTHON_PLUGIN_INTF_LIB "libnotification-plugin-python-interface.so" #define PRINT_FUNC Logger::getLogger()->info("%s:%d", __FUNCTION__, __LINE__); typedef PLUGIN_INFORMATION *(*pluginInitFn)(const char *pluginName, const char *path); using namespace std; /** * Constructor for NotificationPythonPluginHandle * - Load python interface library and initialize the interface */ NotificationPythonPluginHandle::NotificationPythonPluginHandle(const char *pluginName, const char *pluginPathName) : PythonPluginHandle(pluginName, pluginPathName) { // expecting this lib to be present in LD_LIBRARY_PATH: //same dir as where lib-services-common.so is present m_interfaceObjName = PYTHON_PLUGIN_INTF_LIB; // Open interface library object m_hndl = dlopen(m_interfaceObjName.c_str(), RTLD_NOW | RTLD_GLOBAL); if (!m_hndl) { Logger::getLogger()->error("NotificationPythonPluginHandle c'tor: dlopen failed for library '%s' : %s", m_interfaceObjName.c_str(), dlerror()); return; } pluginInitFn initFn = (pluginInitFn) dlsym(m_hndl, "PluginInterfaceInit"); if (initFn == NULL) { // Unable to find PluginInterfaceInit entry point Logger::getLogger()->error("Plugin library %s does not support %s function : %s", m_interfaceObjName.c_str(), "PluginInterfaceInit", dlerror()); dlclose(m_hndl); m_hndl = NULL; return; } // Initialise Python plugin object void *ref = initFn(pluginName, pluginPathName); if (ref == NULL) { fprintf(stderr, "Plugin library %s : PluginInterfaceInit returned failure", m_interfaceObjName.c_str()); dlclose(m_hndl); m_hndl = NULL; return; } // Set type m_type = strstr(pluginPathName, PLUGIN_TYPE_NOTIFICATION_RULE) != NULL ? PLUGIN_TYPE_NOTIFICATION_RULE : PLUGIN_TYPE_NOTIFICATION_DELIVERY; } ================================================ FILE: C/services/common/perfmonitor.cpp ================================================ /* * Fledge storage service client * * Copyright (c) 2023 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <perfmonitors.h> #include <chrono> using namespace std; /** * Constructor for an individual performance monitor * * @param name The name of the performance monitor */ PerfMon::PerfMon(const string& name) : m_name(name), m_samples(0) { } /** * Collect a new value for the performance monitor * * @param value The new value */ void PerfMon::addValue(long value) { lock_guard<mutex> guard(m_mutex); if (m_samples) { if (value < m_min) m_min = value; else if (value > m_max) m_max = value; m_average = ((m_samples * m_average) + value) / (m_samples + 1); m_samples++; } else { m_min = value; m_max = value; m_average = value; m_samples = 1; } } /** * Return the performance values to insert * */ int PerfMon::getValues(InsertValues& values) { lock_guard<mutex> guard(m_mutex); if (m_samples == 0) return 0; values.push_back(InsertValue("minimum", m_min)); values.push_back(InsertValue("maximum", m_max)); values.push_back(InsertValue("average", m_average)); values.push_back(InsertValue("samples", m_samples)); m_min = 0; m_max = 0; m_average = 0; int samples = m_samples; m_samples = 0; return samples; } /** * Constructor for the performance monitors * * @param service The name of the service * @param storage Point to the storage client class for the service */ PerformanceMonitor::PerformanceMonitor(const string& service, StorageClient *storage) : m_service(service), m_storage(storage), m_collecting(false), m_thread(NULL) { } /** * Destructor for the performance monitor */ PerformanceMonitor::~PerformanceMonitor() { if (m_collecting) { setCollecting(false); } // Write thread has now been stopped or // was never running for (const auto& it : m_monitors) { string name = it.first; PerfMon *mon = it.second; delete mon; } } /** * Monitor thread entry point * * @param perfMon The perforamnce monitore class */ static void monitorThread(PerformanceMonitor *perfMon) { perfMon->writeThread(); } /** * Set the collection state of the performance monitors * * @param state The required collection state */ void PerformanceMonitor::setCollecting(bool state) { m_collecting = state; if (m_collecting && m_thread == NULL) { // Start the thread to write the monitors to the database m_thread = new thread(monitorThread, this); } else if (m_collecting == false && m_thread) { // Stop the thread to write the monitors to the database m_cv.notify_all(); m_thread->join(); delete m_thread; m_thread = NULL; } } /** * Add a new value to the named performance monitor * * @param name The name of the performance monitor * @param value The value to add */ void PerformanceMonitor::doCollection(const string& name, long value) { PerfMon *mon; auto it = m_monitors.find(name); if (it == m_monitors.end()) { // Create a new monitor mon = new PerfMon(name); m_monitors[name] = mon; } else { mon = it->second; } mon->addValue(value); } /** * The thread that runs to write database values */ void PerformanceMonitor::writeThread() { while (m_collecting) { unique_lock<mutex> lk(m_mutex); m_cv.wait_for(lk, chrono::seconds(60)); if (m_collecting) { // Write to the database for (const auto& it : m_monitors) { string name = it.first; PerfMon *mon = it.second; InsertValues values; if (mon->getValues(values) > 0) { values.push_back(InsertValue("service", m_service)); values.push_back(InsertValue("monitor", name)); // Inser data writeData("monitors", values); } } } } } ================================================ FILE: C/services/common/plugin.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin.h> #include <plugin_manager.h> Plugin::Plugin(PLUGIN_HANDLE handle) { this->handle = handle; this->manager = PluginManager::getInstance(); this->info = this->manager->getInfo(handle); } Plugin::~Plugin() { } const PLUGIN_INFORMATION *Plugin::getInfo() { return this->info; } ================================================ FILE: C/services/common/plugin_manager.cpp ================================================ /* * Fledge plugin manager. * * Copyright (c) 2017, 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <cstdio> #include <dlfcn.h> #include <string.h> #include <iostream> #include <fstream> #include <unistd.h> #include <plugin_manager.h> #include <binary_plugin_handle.h> #include <south_python_plugin_handle.h> #include <north_python_plugin_handle.h> #include <notification_python_plugin_handle.h> #include <filter_python_plugin_handle.h> #include <dirent.h> #include <sys/param.h> #include "rapidjson/document.h" #include "rapidjson/writer.h" #include "rapidjson/stringbuffer.h" #include "rapidjson/error/error.h" #include "rapidjson/error/en.h" #include <algorithm> #include <config_category.h> #include <string_utils.h> using namespace std; using namespace rapidjson; PluginManager *PluginManager::instance = 0; typedef PLUGIN_INFORMATION *(*func_t)(); /** * PluginManager Singleton implementation */ PluginManager *PluginManager::getInstance() { if (!instance) instance = new PluginManager(); return instance; } /** * Plugin Manager Constructor */ PluginManager::PluginManager() { logger = Logger::getLogger(); m_pluginType = PLUGIN_TYPE_ID_OTHER; } /** * Update plugin info by merging the JSON plugin config over base plugin config * * @param info The plugin info structure * @param json_plugin_name JSON plugin name * @param json_plugin_defaults JSON plugin defaults dict * @param json_plugin_description JSON plugin description */ void updateJsonPluginConfig(PLUGIN_INFORMATION *info, string json_plugin_name, string json_plugin_defaults, string json_plugin_description) { Logger *logger = Logger::getLogger(); logger->debug("Loading base plugin for JSON plugin, so updating plugin_info structure loaded from base plugin"); char *nameStr = new char [json_plugin_name.length()+1]; std::strcpy (nameStr, json_plugin_name.c_str()); info->name = nameStr; // Update json_plugin_description in plugin->description Document doc; doc.Parse(json_plugin_defaults.c_str()); if (doc.HasParseError()) { logger->error("Parse error in plugin '%s' defaults: %s at %d '%s'", json_plugin_name.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), StringAround(json_plugin_defaults, (unsigned)doc.GetErrorOffset())); return; } Document docBase; docBase.Parse(info->config); if (docBase.HasParseError()) { logger->error("Parse error in plugin '%s' information defaults: %s at %d '%s'", json_plugin_name.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset(), StringAround(info->config, (unsigned)doc.GetErrorOffset())); return; } static const char* kTypeNames[] = { "Null", "False", "True", "Object", "Array", "String", "Number" }; DefaultConfigCategory basePluginCc("base", string(info->config)); logger->debug("Original basePluginCc=%s", basePluginCc.toJSON().c_str()); // Iterate over overlay config and find same item in base config and update their default from overlay to base config for (auto& m : doc.GetObject()) { rapidjson::StringBuffer sb; rapidjson::Writer<rapidjson::StringBuffer> writer( sb ); m.value.Accept( writer ); string s = sb.GetString(); //logger->debug("m.value.type()=%s, m.value.GetString()=%s", kTypeNames[m.value.GetType()], s.c_str()); // find item with name 'm.name.GetString()' in base config if (!docBase.HasMember(m.name.GetString())) { logger->warn("Item with name '%s' missing from base config, ignoring it", m.name.GetString()); continue; } else { string baseItemValue = basePluginCc.getDefault(m.name.GetString()); //logger->debug("Original baseItemValue=%s", baseItemValue.c_str()); Value::MemberIterator baseItemDefault = docBase[m.name.GetString()].FindMember("default"); Value::MemberIterator overlayItemDefault = m.value.FindMember("default"); if(baseItemDefault == docBase.MemberEnd() || overlayItemDefault == m.value.MemberEnd()) { logger->warn("Default value for item with name %s missing from base config, ignoring it", m.name.GetString()); continue; } else { //logger->debug("baseItemDefault: name=%s, type=%s, value=%s", // baseItemDefault->name.GetString(), kTypeNames[baseItemDefault->value.GetType()], baseItemDefault->value.GetString()); string s; if (overlayItemDefault->value.IsObject()) { rapidjson::StringBuffer sb; rapidjson::Writer<rapidjson::StringBuffer> writer( sb ); overlayItemDefault->value.Accept( writer ); s = sb.GetString(); } else if (overlayItemDefault->value.IsString()) { s = overlayItemDefault->value.GetString(); } else if (overlayItemDefault->value.IsDouble()) { s = to_string(overlayItemDefault->value.GetDouble()); } else if (overlayItemDefault->value.IsNumber()) { s = to_string(overlayItemDefault->value.GetInt()); } else if (overlayItemDefault->value.IsBool()) { s = overlayItemDefault->value.GetBool() ? "true" : "false"; } else { logger->error("Unable to handle overlayItemDefault: name=%s, type=%d", overlayItemDefault->name.GetString(), overlayItemDefault->value.GetType()); } //logger->debug("overlayItemDefault: name=%s, type=%s, value=%s", // overlayItemDefault->name.GetString(), kTypeNames[overlayItemDefault->value.GetType()], s.c_str()); basePluginCc.setDefault(m.name.GetString(), s); //logger->debug("Updated basePluginCc=%s", basePluginCc.toJSON().c_str()); //logger->printLongString(basePluginCc.itemsToJSON()); } } } // Update info->config char *confStr = new char [basePluginCc.itemsToJSON().length()+1]; std::strcpy (confStr, basePluginCc.itemsToJSON().c_str()); info->config = confStr; //logger->debug("\"defaults\" updated:"); //logger->printLongString(info->config); // Update plugin name and description Document doc2; doc2.Parse(info->config); if (doc2.HasParseError()) { logger->error("Parse error in information returned from plugin: %s at %d '%s'", GetParseError_En(doc2.GetParseError()), (unsigned)doc2.GetErrorOffset(), StringAround(info->config, (unsigned)doc2.GetErrorOffset())); } if (doc2.HasMember("plugin")) { Value::MemberIterator itemValueIter = doc2["plugin"].FindMember("default"); //logger->debug("plugin->default=%s", itemValueIter->value.GetString()); itemValueIter->value.SetString(json_plugin_name.c_str(), doc2.GetAllocator()); Value::MemberIterator itemValueIter2 = doc2["plugin"].FindMember("description"); //logger->debug("plugin->description=%s", itemValueIter2->value.GetString()); itemValueIter2->value.SetString(json_plugin_description.c_str(), doc2.GetAllocator()); } StringBuffer buf; Writer<StringBuffer> writer (buf); doc2.Accept (writer); char *confStr2 = new char [string(buf.GetString()).length()+1]; std::strcpy (confStr2, buf.GetString()); info->config = confStr2; delete[] confStr; logger->debug("Fields updated based on JSON config overlay:"); logger->printLongString(info->config); } /** * Find a specific plugin in the directories listed in FLEDGE_PLUGIN_PATH * * @param name The plugin name * @param _type The plugin type string * @param _plugin_path Value of FLEDGE_PLUGIN_PATH environment variable * @param type The plugin type * @return string The absolute path of plugin */ string PluginManager::findPlugin(string name, string _type, string _plugin_path, PLUGIN_TYPE type) { if (type != BINARY_PLUGIN && type != PYTHON_PLUGIN && type != JSON_PLUGIN) { return ""; } stringstream plugin_path(_plugin_path); string temp; // Tokenizing w.r.t. semicolon ';' while(getline(plugin_path, temp, ';')) { string path = temp+"/"+_type+"/"+name+"/"; switch(type) { case BINARY_PLUGIN: path += "lib"+name+".so"; break; case PYTHON_PLUGIN: path += name+".py"; break; case JSON_PLUGIN: path += name+".json"; break; } if (access(path.c_str(), F_OK) == 0) { Logger::getLogger()->debug("Found plugin @ %s", path.c_str()); return path; } } Logger::getLogger()->debug("Didn't find plugin : name=%s, _type=%s, _plugin_path=%s", name.c_str(), _type.c_str(), _plugin_path.c_str()); return ""; } /** * Set Plugin Type */ void PluginManager::setPluginType(tPluginType type) { m_pluginType = type; } /** * Load a given plugin */ PLUGIN_HANDLE PluginManager::loadPlugin(const string& _name, const string& type) { PluginHandle *pluginHandle = NULL; PLUGIN_HANDLE hndl; char buf[MAXPATHLEN]; string json_plugin_name, json_base_plugin_name, json_plugin_defaults, json_plugin_description; bool json_plugin = false; string name(_name); if (pluginNames.find(name) != pluginNames.end()) { if (type.compare(pluginTypes.find(name)->second)) { logger->error("Plugin %s is already loaded but not the expected type %s\n", name.c_str(), type.c_str()); return NULL; } return pluginNames[name]; } const char *home = getenv("FLEDGE_ROOT"); const char *plugin_path = getenv("FLEDGE_PLUGIN_PATH"); string paths(""); if (home) { paths += string(home)+"/plugins"; paths += ";"+string(home)+"/python/fledge/plugins"; } if (plugin_path) paths += (home ? ";" : "")+string(plugin_path); /* * Find and try to load the plugin that is described via a JSON file */ string path = findPlugin(name, type, paths, JSON_PLUGIN); strncpy(buf, path.c_str(), sizeof(buf)); if (buf[0] && access(buf, F_OK|R_OK) == 0) { // read config from JSON file ifstream ifs(buf, ios::in); std::stringstream sstr; sstr << ifs.rdbuf(); string json=sstr.str(); json.erase(remove(json.begin(), json.end(), '\t'), json.end()); json.erase(remove(json.begin(), json.end(), '\n'), json.end()); // parse JSON document Document doc; doc.Parse(json.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("Parse error for JSON plugin config in '%s': %s at %d", name.c_str(), GetParseError_En(doc.GetParseError()), (unsigned)doc.GetErrorOffset()); return NULL; } if (!(doc.HasMember("name") && doc["name"].IsString() && doc.HasMember("defaults") && doc["defaults"].IsObject() && doc.HasMember("connection") && doc["connection"].IsString())) { Logger::getLogger()->error("JSON config for plugin @ '%s' is missing/misconfigured, exiting...", buf); return NULL; } json_plugin_name = doc["name"].GetString(); json_base_plugin_name = doc["connection"].GetString(); if (doc.HasMember("description") && doc["description"].IsString()) { json_plugin_description = doc["description"].GetString(); } if (doc["defaults"].IsObject()) { rapidjson::StringBuffer sb; rapidjson::Writer<rapidjson::StringBuffer> writer( sb ); doc["defaults"].Accept( writer ); json_plugin_defaults = sb.GetString(); } // set plugin name so that base plugin can be loaded next json_plugin = true; name = json_base_plugin_name; logger->debug("json_plugin=%s, json_plugin_name=%s, json_base_plugin_name=%s, json_plugin_description=%s, json_plugin_defaults=%s", json_plugin?"true":"false", json_plugin_name.c_str(), json_base_plugin_name.c_str(), json_plugin_description.c_str(), json_plugin_defaults.c_str()); } /* * Find and try to load the dynamic library that is the plugin */ path = findPlugin(name, type, paths, BINARY_PLUGIN); strncpy(buf, path.c_str(), sizeof(buf)); if (buf[0] && access(buf, F_OK|R_OK) == 0) { if (m_pluginType == PLUGIN_TYPE_ID_STORAGE) { pluginHandle = new BinaryPluginHandle(name.c_str(), buf, PLUGIN_TYPE_ID_STORAGE); } else { pluginHandle = new BinaryPluginHandle(name.c_str(), buf); } hndl = pluginHandle->getHandle(); if (hndl != NULL) { func_t infoEntry = (func_t)pluginHandle->GetInfo(); if (infoEntry == NULL) { // Unable to find plugin_info entry point logger->error("C plugin %s does not support plugin_info entry point.\n", name.c_str()); delete pluginHandle; return NULL; } PLUGIN_INFORMATION *info = (PLUGIN_INFORMATION *)(*infoEntry)(); logger->debug("%s:%d: name=%s, type=%s, default config=%s", __FUNCTION__, __LINE__, info->name, info->type, info->config); if (strcmp(info->type, type.c_str()) != 0) { // Log error, incorrect plugin type logger->error("C plugin %s is not of the expected type %s, it is of type %s.\n", name.c_str(), type.c_str(), info->type); delete pluginHandle; return NULL; } if (json_plugin) { updateJsonPluginConfig(info, json_plugin_name, json_plugin_defaults, json_plugin_description); } plugins.push_back(pluginHandle); pluginNames[name] = hndl; pluginTypes[name] = type; pluginImplTypes[hndl] = BINARY_PLUGIN; pluginInfo[hndl] = info; pluginHandleMap[hndl] = pluginHandle; logger->debug("%s:%d: Added entry in pluginHandleMap={%p, %p}", __FUNCTION__, __LINE__, hndl, pluginHandle); } else { logger->error("PluginManager: Failed to load C plugin %s in %s: %s.", name.c_str(), buf, dlerror()); } return hndl; } // look for and load python plugin with given name path = findPlugin(name, type, paths, PYTHON_PLUGIN); strncpy(buf, path.c_str(), sizeof(buf)); if (buf[0] && access(buf, F_OK|R_OK) == 0) { // is it Notification Rule Python plugin ? if (type.compare(PLUGIN_TYPE_NOTIFICATION_RULE) == 0 || type.compare(PLUGIN_TYPE_NOTIFICATION_DELIVERY) == 0) { pluginHandle = new NotificationPythonPluginHandle(name.c_str(), buf); } else if (type.compare(PLUGIN_TYPE_FILTER) == 0) { pluginHandle = new FilterPythonPluginHandle(name.c_str(), buf); } else if (type.compare(PLUGIN_TYPE_NORTH) == 0) { pluginHandle = new NorthPythonPluginHandle(name.c_str(), buf); } else { pluginHandle = new SouthPythonPluginHandle(name.c_str(), buf); } hndl = pluginHandle->getHandle(); if (hndl != NULL) { func_t infoEntry = (func_t)pluginHandle->GetInfo(); if (infoEntry == NULL) { // Unable to find plugin_info entry point logger->error("Python plugin %s does not support plugin_info entry point.\n", name.c_str()); delete pluginHandle; return NULL; } PLUGIN_INFORMATION *info = (PLUGIN_INFORMATION *)(*infoEntry)(); if (!info) { // Unable to get data from plugin_info entry point logger->error("Python plugin %s cannot get data from plugin_info entry point.\n", name.c_str()); delete pluginHandle; return NULL; } if (strcmp(info->type, type.c_str()) != 0) { // Log error, incorrect plugin type logger->error("C plugin %s is not of the expected type %s, it is of type %s.\n", name.c_str(), type.c_str(), info->type); delete pluginHandle; return NULL; } if (json_plugin) { updateJsonPluginConfig(info, json_plugin_name, json_plugin_defaults, json_plugin_description); } plugins.push_back(pluginHandle); pluginNames[name] = hndl; pluginTypes[name] = type; pluginImplTypes[hndl] = PYTHON_PLUGIN; pluginInfo[hndl] = info; pluginHandleMap[hndl] = pluginHandle; } else { logger->error("PluginManager: Failed to load python plugin %s in %s", name.c_str(), buf); } return hndl; } if (json_plugin) // if base plugin had been found, this function would have returned already { logger->error("PluginManager: Could not load base plugin '%s' for JSON plugin '%s'", json_base_plugin_name.c_str(), json_plugin_name.c_str()); return NULL; } logger->error("PluginManager: Failed to load plugin '%s' as any of the recognised types. Check that the plugin exists and the plugin name and installation directory match", name.c_str()); return NULL; } /** * Find a loaded plugin by name. */ PLUGIN_HANDLE PluginManager::findPluginByName(const string& name) { if (pluginNames.find(name) == pluginNames.end()) { return NULL; } return pluginNames.find(name)->second; } /** * Find a loaded plugin by type */ PLUGIN_HANDLE PluginManager::findPluginByType(const string& type) { if (pluginNames.find(type) == pluginNames.end()) { return NULL; } return pluginNames.find(type)->second; } /** * Return the information for a named plugin */ PLUGIN_INFORMATION *PluginManager::getInfo(const PLUGIN_HANDLE handle) { if (pluginInfo.find(handle) == pluginInfo.end()) { return NULL; } return pluginInfo.find(handle)->second; } /** * Resolve a symbol within the plugin */ PLUGIN_HANDLE PluginManager::resolveSymbol(PLUGIN_HANDLE handle, const string& symbol) { if (pluginHandleMap.find(handle) == pluginHandleMap.end()) { logger->warn("%s:%d: Cannot find PLUGIN_HANDLE in pluginHandleMap: returning NULL", __FUNCTION__, __LINE__); return NULL; } return pluginHandleMap.find(handle)->second->ResolveSymbol(symbol.c_str()); } /** * Get the installed plugins in the given plugin type * subdirectory of "plugins" under FLEDGE_ROOT * Plugin type is one of: * south, north, filter, notificationRule, notificationDelivery * * @param type The plugin type * @param plugins The output plugin list name to fill */ void PluginManager::getInstalledPlugins(const string& type, list<string>& plugins) { char *home = getenv("FLEDGE_ROOT"); char *plugin_path = getenv("FLEDGE_PLUGIN_PATH"); string paths(""); if (home) { // Binary C plugins paths += string(home)+"/plugins"; // Python Plugins paths += ";"+string(home)+"/python/fledge/plugins"; } if (plugin_path) { paths += (home?";":"")+string(plugin_path); } stringstream _paths(paths); string temp; // Tokenize w.r.t. semicolon ';' while(getline(_paths, temp, ';')) { struct dirent *entry; DIR *dp; string path = temp + "/" + type + "/"; // Open the plugins dir/type dp = opendir(path.c_str()); if (!dp) { // Can not open specified dir path char msg[128]; char* ret = strerror_r(errno, msg, 128); logger->warn("Can not access plugin directory %s: %s", path.c_str(), ret); continue; } /** * Get all sub directory names in path: * path = plugins/filter/ * delta * scale * Plugin filename is libdelta.so, libscale.so * Plugin name is the subdirecory name in path * Skip directory starting with '_' or * with name 'common' */ while ((entry = readdir(dp))) { if (strcmp (entry->d_name, "..") != 0 && strcmp (entry->d_name, ".") != 0 && strcmp (entry->d_name, "common") != 0 && entry->d_name[0] != '_') { struct stat stbuf; bool is_dir(false); if (stat((path + entry->d_name).c_str(), &stbuf) != 0) { continue; } is_dir = S_ISDIR(stbuf.st_mode); if (!is_dir) { continue; } /* check for duplicate names to avoid multiple loadPlugin calls */ bool is_duplicate = false; for (const auto& loadedPlugin : plugins) { if (loadedPlugin == entry->d_name) { is_duplicate = true; break; } } if (!is_duplicate) { // Load plugin, given its name: the directory name loadPlugin(entry->d_name, type); // Add name to ouput list plugins.push_back(entry->d_name); } } } closedir(dp); } } /** * Return a list of plugins matching the criteria * of plugin type and plugin flags * * @param type The plugin type to match * @param flags A bitmask of flags to match * @return vector<string> A list of matching plugin names */ std::vector<string> PluginManager::getPluginsByFlags(const std::string& type, unsigned int flags) { // Plugins matching type and flag bits std::vector<std::string> matchingPlugins; // Get list of installed plugins of given type std::list<std::string> plugins; getInstalledPlugins(type, plugins); /* Iterate list of installed plugins and match plugin 'options' with passed plugin flags */ for (auto &pName: plugins) { // Fetch loaded plugin handle auto pluginHandle = pluginNames.find(pName); unsigned int pluginOptions = 0; if (pluginHandle != pluginNames.end()) { pluginOptions = getInfo(pluginHandle->second)->options; } // Match bit fields corresponding to loaded plugins if ((flags & pluginOptions) == flags) { matchingPlugins.push_back(pName); } } return matchingPlugins; } ================================================ FILE: C/services/common/service_security.cpp ================================================ #include <config_category.h> #include <string> #include <management_client.h> #include <service_handler.h> #include <config_handler.h> #include <server_http.hpp> #include <rapidjson/error/en.h> #include <acl.h> #define TO_STRING(...) DEFER(TO_STRING_)(__VA_ARGS__) #define DEFER(x) x #define TO_STRING_(...) #__VA_ARGS__ #define QUOTE(...) TO_STRING(__VA_ARGS__) #define DELTA_SECONDS_BEFORE_TOKEN_EXPIRATION 120 using namespace std; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; static void bearer_token_refresh_thread(void *data); /** * Initialise m_mgtClient object to NULL */ ManagementClient *ServiceAuthHandler::m_mgtClient = NULL; /** * Create "${service}Security" category with empty content * * @param mgtClient The management client object * @param dryRun Dryrun so do not register interest in the category * @return True on success, False otherwise */ bool ServiceAuthHandler::createSecurityCategories(ManagementClient* mgtClient, bool dryRun) { string securityCatName = m_name + string("Security"); DefaultConfigCategory defConfigSecurity(securityCatName, string("{}")); // All services add 'AuthenticatedCaller' item // Add AuthenticatedCaller item, set to "false" defConfigSecurity.addItem("AuthenticatedCaller", "Security enable parameter", "boolean", // For dispatcher set default = true this->getType() == "Dispatcher" ? "true" : "false", // Default "false"); // Value defConfigSecurity.setItemDisplayName("AuthenticatedCaller", "Enable caller authorisation"); defConfigSecurity.addItem("ACL", "Service ACL for " + m_name, "ACL", "", // Default ""); // Value defConfigSecurity.setItemDisplayName("ACL", "Service ACL"); defConfigSecurity.setDescription(m_name + " Security"); // Create/Update category name (we pass keep_original_items=true) mgtClient->addCategory(defConfigSecurity, true); // Add this service under 'm_name' parent category vector<string> children1; children1.push_back(securityCatName); mgtClient->addChildCategories(m_name, children1); // Get new or merged category content m_security = mgtClient->getCategory(m_name + "Security"); this->setInitialAuthenticatedCaller(); // Register for security category content changes ConfigHandler *configHandler = ConfigHandler::getInstance(mgtClient); if (configHandler == NULL) { Logger::getLogger()->error("Failed to get access to ConfigHandler for %s", m_name.c_str()); return false; } if (!dryRun) { // Register for content change notification configHandler->registerCategory(this, m_name + "Security"); } // Load ACL given the value of 'acl' type item: i.e. string acl_name = m_security.getValue("ACL"); if (!acl_name.empty()) { m_service_acl = m_mgtClient->getACL(acl_name); } // Start thread for automatic bearer token refresh, before expiration if (this->getType() != "Southbound" && dryRun == false) { m_refreshThread = new thread(bearer_token_refresh_thread, this); } return true; } /** * Update the class objects from security category content update * * @param category The service category name * @return True on success, False otherwise */ bool ServiceAuthHandler::updateSecurityCategory(const string& category) { // Lock config lock_guard<mutex> cfgLock(m_mtx_config); m_security = ConfigCategory(m_name + "Security", category); bool acl_set = false; // Note: as per FOGL-6612 // Only AuthenticatedCaller will be handled in Security category change notification // ACL update is made via security change service handler // Check for AuthenticatedCaller main switch if (m_security.itemExists("AuthenticatedCaller")) { string val = m_security.getValue("AuthenticatedCaller"); if (val[0] == 't' || val[0] == 'T') { acl_set = true; } } m_authentication_enabled = acl_set; Logger::getLogger()->debug("updateSecurityCategory called, switch val %d", acl_set); return acl_set; } /** * Set initial value of enabled authentication */ void ServiceAuthHandler::setInitialAuthenticatedCaller() { bool acl_set = false; if (m_security.itemExists("AuthenticatedCaller")) { string val = m_security.getValue("AuthenticatedCaller"); Logger::getLogger()->debug("This service '%s' has AuthenticatedCaller item %s", m_name.c_str(), val.c_str()); if (val[0] == 't' || val[0] == 'T') { acl_set = true; } this->setAuthenticatedCaller(acl_set); } } /** * Set enabled authentication value * * @param enabled The enable/disable flag to set */ void ServiceAuthHandler::setAuthenticatedCaller(bool enabled) { lock_guard<mutex> guard(m_mtx_config); m_authentication_enabled = enabled; } /** * Return enabled authentication value * * @return True on success, False otherwise */ bool ServiceAuthHandler::getAuthenticatedCaller() { lock_guard<mutex> guard(m_mtx_config); return m_authentication_enabled; } /** * Verify URL path against URL array in security configuration * If array item value has ACL property a service name/type check is added * * @param path The requested service HTTP resource * @param serviceName The serviceName to check * @param serviceType The serviceType to check * @return True is the resource acces has been granted, false otherwise */ bool ServiceAuthHandler::verifyURL(const string& path, const string& serviceName, const string& serviceType) { // Check config with lock unique_lock<mutex> cfgLock(m_mtx_config); // Check m_security category item ACL is set string acl; if (this->m_security.itemExists("ACL")) { acl = this->m_security.getValue("ACL"); } cfgLock.unlock(); if (acl.empty()) { Logger::getLogger()->debug("verifyURL '%s', type '%s', " "the ACL is not set: allow any URL from any service type", serviceName.c_str(), serviceType.c_str()); return true; } const vector<ACL::UrlItem>& arrayURL = this->m_service_acl.getURL(); if (arrayURL.size() == 0) { Logger::getLogger()->debug("verifyURL '%s', type '%s', " "the URL array is empty: allow any URL from any service type", serviceName.c_str(), serviceType.c_str()); return true; } if (arrayURL.size() > 0) { bool typeMatched = false; bool URLMatched = false; // Check URL value for (auto it = arrayURL.begin(); it != arrayURL.end(); ++it) { string configURL = (*it).url; // Request path matches configured URLs if (configURL != "" && configURL == path) { URLMatched = true; } vector<ACL::KeyValueItem> aclServices = (*it).acl; if (URLMatched && aclServices.size() == 0) { Logger::getLogger()->debug("verifyURL '%s', type '%s', " "the URL '%s' has no ACL : allow any service type", serviceName.c_str(), serviceType.c_str()); return true; } for (auto iS = aclServices.begin(); iS != aclServices.end(); ++iS) { if ((*iS).key == "type" && (*iS).value == serviceType) { typeMatched = true; break; } } } Logger::getLogger()->debug("verify URL path '%s', type '%s': " "result URL %d, result type %d", path.c_str(), serviceType.c_str(), URLMatched, typeMatched); return URLMatched == true || typeMatched == true; } return false; } /** * Verify service caller name and type against ACL array in security configuration * * @param sName The caller service name * @param sType The caller service type (Northbound, Southbound, Notification, etc) * @return True is the resource acces has been granted, false otherwise */ bool ServiceAuthHandler::verifyService(const string& sName, const string &sType) { // Check config with lock unique_lock<mutex> cfgLock(m_mtx_config); // Check m_security category item ACL is set string acl; if (this->m_security.itemExists("ACL")) { acl = this->m_security.getValue("ACL"); } cfgLock.unlock(); if (acl.empty()) { Logger::getLogger()->debug("verifyService '%s', type '%s', " "the ACL is not set: allow any service", sName.c_str(), sType.c_str()); return true; } vector<ACL::KeyValueItem> aclServices = this->m_service_acl.getService(); if (aclServices.size() == 0) { Logger::getLogger()->debug("verifyService '%s', type '%s', " \ "has an empty ACL service array: allow any service", sName.c_str(), sType.c_str()); return true; } if (aclServices.size() > 0) { bool serviceMatched = false; bool typeMatched = false; for (auto it = aclServices.begin(); it != aclServices.end(); ++it) { if ((*it).key == "name" && (*it).value == sName) { serviceMatched = true; break; } if ((*it).key == "type" && (*it).value == sType) { typeMatched = true; break; } } Logger::getLogger()->debug("verify service '%s', type '%s': " "result service %d, result type %d", sName.c_str(), sType.c_str(), serviceMatched, typeMatched); return serviceMatched == true || typeMatched == true; } return false; } /** * Authentication Middleware for PUT methods * * Routine first check whether the service is configured with authentication * * Access bearer token is then verified against FogLAMP core API endpoint * JWT token claims are passed to verifyURL and verifyService routines * * If access is granted the input funcPUT funcion is called * otherwise error response is sent to the client * * @param response The HTTP Response to send * @param request The HTTP Request * @param funcPUT The function to call in case of access granted */ void ServiceAuthHandler::AuthenticationMiddlewarePUT(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request, std::function<void( shared_ptr<HttpServer::Response>, shared_ptr<HttpServer::Request>)> funcPUT) { string callerName; string callerType; for(auto &field : request->header) { if (field.first == "Service-Orig-From") { callerName = field.second; } if (field.first == "Service-Orig-Type") { callerType = field.second; } } // Get authentication enabled value bool acl_set = this->getAuthenticatedCaller(); Logger::getLogger()->debug("This service '%s' has AuthenticatedCaller flag set %d " "caller service is %s, type %s", this->getName().c_str(), acl_set, callerName.c_str(), callerType.c_str()); // Check authentication if (acl_set) { // Verify token via Fledge management core POST API call // we do not need token claims here bool ret = m_mgtClient->verifyAccessBearerToken(request); if (!ret) { string msg = "invalid service bearer token"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); this->respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); return; } // Check whether caller name and type are passed if (callerName.empty() && callerType.empty()) { string msg = "authorisation not granted " \ "to this service: missing caller name and type"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); this->respond(response, SimpleWeb::StatusCode::client_error_unauthorized, responsePayload); return; } // Dispatcher service is always allowed to send control requests // to south service // // Checking for valid origin service caller (name/type) i.e // N1_HTTP/Northbound // NOTS/Notification bool valid_service = this->verifyService(callerName, callerType); if (!valid_service) { string msg = "authorisation not granted to this service"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); this->respond(response, SimpleWeb::StatusCode::client_error_unauthorized, responsePayload); return; } // Check URLS bool access_granted = this->verifyURL(request->path, callerName, callerType); if (!access_granted) { string msg = "authorisation not granted to this resource"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); this->respond(response, SimpleWeb::StatusCode::client_error_unauthorized, responsePayload); return; } } // Call PUT endpoint routine funcPUT(response, request); } /** * Authentication Middleware ACL check * * serviceName, serviceType and url (request->path) * are cheked with verifyURL and verifyService routines * * If access is granted return true * otherwise error response is sent to the client and return is false * * @param response The HTTP Response to send * @param request The HTTP Request * @param callerName The caller service name to check * @param callerType The caller service type to check * @return True on success * False otherwise with server reply error */ bool ServiceAuthHandler::AuthenticationMiddlewareACL(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request, const string& callerName, const string& callerType) { // Check for valid service caller (name, type) bool valid_service = this->verifyService(callerName, callerType); if (!valid_service) { string msg = "authorisation not granted to this service"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); // Error reply to client this->respond(response, SimpleWeb::StatusCode::client_error_unauthorized, responsePayload); return false; } // Check URLS bool access_granted = this->verifyURL(request->path, callerName, callerType); if (!access_granted) { string msg = "authorisation not granted to this resource"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); // Error reply to client this->respond(response, SimpleWeb::StatusCode::client_error_unauthorized, responsePayload); return false; } return true; } /** * Authentication Middleware for Dispatcher service * * Routine first check whether the service is configured with authentication * * Access bearer token is then verified against FogLAMP core API endpoint * token claims 'sub' and 'aud' along with request are passed to * verifyURL and verifyService routines * * If access is granted then return map with token claims * otherwise error response is sent to the client * and empty map is returned. * * @param response The HTTP Response to send * @param request The HTTP Request * @return True on success * False on errors */ bool ServiceAuthHandler::AuthenticationMiddlewareCommon(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request, string& callerName, string& callerType) { // Get token from HTTP request BearerToken bToken(request); // Verify token via Fledge management core POST API call and fill tokenClaims map bool ret = m_mgtClient->verifyAccessBearerToken(bToken); if (!ret) { string msg = "invalid service bearer token"; string responsePayload = "{ \"error\" : \"" + msg + "\" }"; Logger::getLogger()->error(msg.c_str()); // Error reply to client this->respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); // Failure return false; } // Check for valid service caller (name, type) and URLs bool check = this->AuthenticationMiddlewareACL(response, request, bToken.getSubject(), bToken.getAudience()); // Check ACL result if (!check) { // Failure return false; } // Set caller name & type callerName = bToken.getSubject(); callerType = bToken.getAudience(); // Success return true; } /** * Refresh the bearer token of the runnign service * This routine is run by a thread started in * createSecurityCategories. * * After sleep time got in 'exp' of curren token * a new one is requested to the core via * token_refresh API endpoint */ void ServiceAuthHandler::refreshBearerToken() { Logger::getLogger()->debug("Bearer token refresh thread starts for service '%s'", this->getName().c_str()); int max_retries = 10; time_t expires_in = 0; int k = 0; bool tokenVerified = false; string current_token; // While server is running get bearer token // and sleeps for a few secods. // When expires_in - DELTA_SECONDS_BEFORE_TOKEN_EXPIRATION seconds is done // then get new token and sleep again while (m_refreshRunning) { if (k >= max_retries) { string msg = "Bearer token not found for service '" + this->getName() + " refresh thread exits after " + std::to_string(max_retries) + " retries"; Logger::getLogger()->error(msg.c_str()); // Shutdown service if (m_refreshRunning) { Logger::getLogger()->warn("Service is being restarted " \ "due to bearer token refresh error"); this->restart(); break; } } if (!tokenVerified) { // Fetch current bearer token BearerToken bToken(m_mgtClient->getRegistrationBearerToken()); if (bToken.exists()) { // Ask verification to core service and get token claims tokenVerified = m_mgtClient->verifyBearerToken(bToken); } // Give it a try in case of any error from core service if (!tokenVerified) { k++; Logger::getLogger()->error("Refreshing bearer token thread for service '%s' " "got empty or invalid bearer token '%s', retry n. %d", this->getName().c_str(), bToken.token().c_str(), k); // Sleep for 1 second std::this_thread::sleep_for(std::chrono::seconds(1)); continue; } // Save verified token current_token = bToken.token(); // Token exists and it is valid, get expiration time expires_in = bToken.getExpiration() - time(NULL) - DELTA_SECONDS_BEFORE_TOKEN_EXPIRATION; Logger::getLogger()->debug("Bearer token refresh will be called in " "%ld seconds, service '%s'", expires_in, this->getName().c_str()); } // Check the expiration time is done if (expires_in > 0) { // Thread sleeps for a few seconds, so it can get shutdown indicator std::this_thread::sleep_for(std::chrono::seconds(10)); expires_in -= 10; continue; } // A shutdown maybe is set, since last check: check it now // refresh_token core API endpoint if (!m_refreshRunning) { Logger::getLogger()->info("Service is being shut down: " \ "refresh thread does not call " \ "refresh endpoint and exits now"); break; } Logger::getLogger()->debug("Bearer token refresh thread calls " "token refresh endpoint for service '%s'", this->getName().c_str()); // Get a new bearer token for this service via // refresh_token core API endpoint string newToken; bool ret = m_mgtClient->refreshBearerToken(current_token, newToken); if (ret) { Logger::getLogger()->debug("Bearer token refresh thread has got " "a new bearer token for service '%s, %s", this->getName().c_str(), newToken.c_str()); // Store new bearer token m_mgtClient->setNewBearerToken(newToken); // Next loop will veryfy token tokenVerified = false; } else { k++; string msg = "Failed to get a new token " "via refresh API call for service '" + this->getName() + "'"; Logger::getLogger()->fatal("%s, current token is '%s', retry n. %d", msg.c_str(), current_token.c_str(), k); // Sleep for some time std::this_thread::sleep_for(std::chrono::seconds(1)); continue; } } Logger::getLogger()->info("Refreshing bearer token thread for service '%s' stopped", this->getName().c_str()); } /** * Thread to refresh the bearer token for * * @param data Pointer to ServiceAuthHandler object */ static void bearer_token_refresh_thread(void *data) { ServiceAuthHandler *service = (ServiceAuthHandler *)data; service->refreshBearerToken(); } /** * Request security change action: * * Given a reason code, “attachACL”, “detachACL”, “reloadACL”, “updateACL” * in 'reason' atribute, the ACL name in 'argument' could be * attached, detached or reloaded * * @param payload The JSON document with 'reason' and 'argument' * @retun True on success */ bool ServiceAuthHandler::securityChange(const string& payload) { // Parse JSON data ACL::ACLReason reason(payload); Logger::getLogger()->debug("Reason is %s, argument %s", reason.getReason().c_str(), reason.getArgument().c_str()); string r = reason.getReason(); // Lock config lock_guard<mutex> cfgLock(m_mtx_config); if (r == "attachACL") { // Fetch and load ACL m_service_acl = m_mgtClient->getACL(reason.getArgument()); } else if (r == "reloadACL" || r == "updateACL") { // Fetch and load new or updated ACL m_service_acl = m_mgtClient->getACL(reason.getArgument()); } else if (r == "detachACL") { m_service_acl = ACL(); } else { // Error Logger::getLogger()->error("Reason '%s' is not supported", reason.getReason().c_str()); return false; } return true; } ================================================ FILE: C/services/common/south_python_plugin_handle.cpp ================================================ /* * Fledge plugin handle related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora, Massimiliano Pinto */ #include <config_category.h> #include <reading.h> #include <logger.h> #include <south_python_plugin_handle.h> #define PYTHON_PLUGIN_INTF_LIB "libsouth-plugin-python-interface.so" #define PRINT_FUNC Logger::getLogger()->info("%s:%d", __FUNCTION__, __LINE__); typedef PLUGIN_INFORMATION *(*pluginInitFn)(const char *pluginName, const char *path); using namespace std; /** * Constructor for PythonPluginHandle * - Load python interface library and initialize the interface * * @param pluginName The Python plugin name to load * @param pluginPathName The Python plugin path */ SouthPythonPluginHandle::SouthPythonPluginHandle(const char *pluginName, const char *pluginPathName) : PythonPluginHandle(pluginName, pluginPathName) { // expecting this lib to be present in LD_LIBRARY_PATH: //same dir as where lib-services-common.so is present string libPath = PYTHON_PLUGIN_INTF_LIB; m_hndl = dlopen(libPath.c_str(), RTLD_NOW | RTLD_GLOBAL); if (!m_hndl) { Logger::getLogger()->error("PythonPluginHandle c'tor: dlopen failed for library '%s' : %s", libPath.c_str(), dlerror()); return; } pluginInitFn initFn = (pluginInitFn) dlsym(m_hndl, "PluginInterfaceInit"); if (initFn == NULL) { // Unable to find PluginInterfaceInit entry point Logger::getLogger()->error("Plugin library %s does not support %s function : %s", libPath.c_str(), "PluginInterfaceInit", dlerror()); dlclose(m_hndl); m_hndl = NULL; return; } // Initialise embedded Python and the interface void *ref = initFn(pluginName, pluginPathName); if (ref == NULL) { fprintf(stderr, "Plugin library %s : PluginInterfaceInit returned failure", libPath.c_str()); dlclose(m_hndl); m_hndl = NULL; return; } // Set type m_type = PLUGIN_TYPE_SOUTH; } ================================================ FILE: C/services/common-plugin-interfaces/python/include/python_plugin_common_interface.h ================================================ #ifndef _PYTHON_PLUGIN_BASE_INTERFACE_H #define _PYTHON_PLUGIN_BASE_INTERFACE_H /* * Fledge common plugin interface * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto, Amandeep Singh Arora */ #include <cctype> #include <plugin_manager.h> #define SHIM_SCRIPT_REL_PATH "/python/fledge/plugins/common/shim/" #define SHIM_SCRIPT_POSTFIX "_shim" using namespace std; /** * This class represents the loaded Python module * with interpreter initialisation flag. * That flag is checked in PluginInterfaceCleanup * before removing Python interpreter. */ class PythonModule { public: PythonModule(PyObject* module, bool init, string name, string type, PyThreadState* state) : m_module(module), m_init(init), m_name(name), m_type(type), m_tState(state) { }; ~PythonModule() { // Destroy loaded Python module Py_CLEAR(m_module); m_module = NULL; }; void setCategoryName(string category) { m_categoryName = category; }; string& getCategoryName() { return m_categoryName; }; public: PyObject* m_module; bool m_init; string m_name; string m_type; PyThreadState* m_tState; string m_categoryName; }; extern "C" { // This is the map of Python object initialised in each // South, Notification, Filter plugin interfaces static map<string, PythonModule*> *pythonModules = new map<string, PythonModule*>(); // Map of PLUGIN_HANDLE objects, updated by plugin_init calls static map<PLUGIN_HANDLE, PythonModule*> *pythonHandles = new map<PLUGIN_HANDLE, PythonModule*>(); // Global variable gPluginName set by PluginInterfaceInit: // it has a different memory address when set/read by // PluginInterfaceInit in South, Filter or Notification // Only used in plugin_info_fn calls static string gPluginName; // Common methods to all plugin interfaces static PLUGIN_INFORMATION *plugin_info_fn(); static PLUGIN_HANDLE plugin_init_fn(ConfigCategory *); static void plugin_reconfigure_fn(PLUGIN_HANDLE*, const std::string&); static void plugin_shutdown_fn(PLUGIN_HANDLE); static void logErrorMessage(); static bool numpyImportError = false; /** * Destructor for PythonPluginHandle * - Free up owned references * - Unload python 3.5 interpreter * * @param plugnName The Python plugin to cleanup */ void PluginInterfaceCleanup(const string& pluginName) { bool removePython = false; if (!pythonModules) { Logger::getLogger()->error("pythonModules map is NULL " "in PluginInterfaceCleanup, plugin '%s'", pluginName.c_str()); return; } // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); // Look for Python module, pluginName is the key auto it = pythonModules->find(pluginName); if (it != pythonModules->end()) { // Remove Python 3.x environment? removePython = it->second->m_init; // Remove this element pythonModules->erase(it); } // Look for Python module handle for (auto h = pythonHandles->begin(); h != pythonHandles->end(); ) { // Compare pluginName with m_name if (h->second->m_name.compare(pluginName) == 0) { // Remove PythonModule object if (h->second->m_module) { Py_CLEAR(h->second->m_module); h->second->m_module = NULL; } // Remove PythonModule delete h->second; h->second = NULL; // Remove this element h = pythonHandles->erase(h); } else { ++h; } } // Remove PythonModule object if (it->second && it->second->m_module) { Py_CLEAR(it->second->m_module); it->second->m_module = NULL; } // Remove all maps if empty if (pythonModules->size() == 0) { // Remove map object delete pythonModules; } if (pythonHandles->size() == 0) { // Remove map object delete pythonHandles; } if (removePython) { Logger::getLogger()->debug("Removing Python interpreter " "started by plugin '%s'", pluginName.c_str()); // Cleanup Python 3.5 Py_Finalize(); } else { PyGILState_Release(state); } Logger::getLogger()->debug("PluginInterfaceCleanup succesfully " "called for plugin '%s'", pluginName.c_str()); } /** * Returns function pointer that can be invoked to call 'plugin_info' * function in python plugin */ static void* PluginInterfaceGetInfo() { return (void *) plugin_info_fn; } /** * Function to set current loglevel in given python plugin/filter module * * @param python_module The python plugin/filter module to which to propagate the loglevel * @param s Debug string indicating the module name and plugin API that caused this loglevel change */ void set_loglevel_in_python_module(PyObject *python_module, string s) { string& _loglevel = Logger::getLogger()->getMinLevel(); for (auto & c: _loglevel) c = toupper(c); const char *loglevel = _loglevel.c_str(); PyObject* mod = python_module; if (mod != NULL) { PyObject* loggerObj = PyObject_GetAttrString(mod, "_LOGGER"); if (loggerObj != NULL) { PyObject* method = PyObject_GetAttrString(loggerObj, "setLevel"); if (method != NULL) { PyObject *args = PyTuple_New(1); PyObject *pValue = Py_BuildValue("s", loglevel); PyTuple_SetItem(args, 0, pValue); PyObject* retVal = PyObject_Call(method, args, NULL); Py_CLEAR(args); Py_CLEAR(method); Py_CLEAR(loggerObj); if (retVal != NULL) { Logger::getLogger()->debug("%s: %s: _LOGGER.setLevel(%s) done successfully", __FUNCTION__, s.c_str(), loglevel); } else { Logger::getLogger()->warn("%s: _LOGGER.setLevel(%s) failed", __FUNCTION__, loglevel); if (PyErr_Occurred()) { logErrorMessage(); return; } } } else { Logger::getLogger()->warn("%s: Method 'setLevel' not found", __FUNCTION__); Py_CLEAR(loggerObj); } } else { Logger::getLogger()->warn("%s: Object '_LOGGER' not found in python module", __FUNCTION__); } } else Logger::getLogger()->warn("%s: module is NULL", __FUNCTION__); PyErr_Clear(); } /** * Invokes json.dumps inside python interpreter */ const char *json_dumps(PyObject *json_dict) { PyObject *rval; PyObject *mod, *method; PyGILState_STATE state = PyGILState_Ensure(); if ((mod = PyImport_ImportModule("json")) != NULL) { if ((method = PyObject_GetAttrString(mod, "dumps")) != NULL) { PyObject *args = PyTuple_New(1); PyObject *pValue = Py_BuildValue("O", json_dict); PyTuple_SetItem(args, 0, pValue); rval = PyObject_Call(method, args, NULL); Py_CLEAR(args); Py_CLEAR(method); Py_CLEAR(mod); if (rval == NULL) { if (PyErr_Occurred()) { logErrorMessage(); return NULL; } } else Logger::getLogger()->info("%s:%d, rval type=%s", __FUNCTION__, __LINE__, (Py_TYPE(rval))->tp_name); } else { Logger::getLogger()->fatal("Method 'dumps' not found"); Py_CLEAR(mod); } // Remove references } else { Logger::getLogger()->fatal("Failed to import module"); } // Reset error PyErr_Clear(); PyGILState_Release(state); const char *retVal = PyUnicode_AsUTF8(rval); Logger::getLogger()->debug("%s: retVal=%s", __FUNCTION__, retVal); return retVal; } /** * Invokes json.loads inside python interpreter */ PyObject *json_loads(const char *json_str) { PyObject *rval; PyObject *mod, *method; PyGILState_STATE state = PyGILState_Ensure(); if ((mod = PyImport_ImportModule("json")) != NULL) { if ((method = PyObject_GetAttrString(mod, "loads")) != NULL) { PyObject *args = PyTuple_New(1); PyObject *pValue = Py_BuildValue("s", json_str); PyTuple_SetItem(args, 0, pValue); Logger::getLogger()->debug("%s:%d: method=%p, args=%p, pValue=%p", __FUNCTION__, __LINE__, method, args, pValue); rval = PyObject_Call(method, args, NULL); Py_CLEAR(args); Py_CLEAR(method); Py_CLEAR(mod); if (rval == NULL) { if (PyErr_Occurred()) { logErrorMessage(); return NULL; } } else Logger::getLogger()->debug("%s:%d, rval type=%s", __FUNCTION__, __LINE__, (Py_TYPE(rval))->tp_name); } else { Logger::getLogger()->fatal("Method 'loads' not found"); Py_CLEAR(mod); } } else { Logger::getLogger()->fatal("Failed to import module"); } // Reset error PyErr_Clear(); PyGILState_Release(state); return rval; } /** * Fill PLUGIN_INFORMATION structure from Python object * * @param pyRetVal Python 3.5 Object (dict) * @return Pointer to a new PLUGIN_INFORMATION structure * or NULL in case of errors */ static PLUGIN_INFORMATION *Py2C_PluginInfo(PyObject* pyRetVal) { // Create returnable PLUGIN_INFORMATION structure PLUGIN_INFORMATION *info = new PLUGIN_INFORMATION; info->options = 0; // these are borrowed references returned by PyDict_Next PyObject *dKey, *dValue; Py_ssize_t dPos = 0; PyObject* objectsRepresentation = PyObject_Repr(pyRetVal); const char* s = PyUnicode_AsUTF8(objectsRepresentation); Logger::getLogger()->debug("Py2C_PluginInfo(): plugin_info returned: %s", s); Py_CLEAR(objectsRepresentation); // dKey and dValue are borrowed references while (PyDict_Next(pyRetVal, &dPos, &dKey, &dValue)) { const char* ckey = PyUnicode_AsUTF8(dKey); const char* cval = PyUnicode_AsUTF8(dValue); Logger::getLogger()->debug("%s:%d, key=%s, value=%s, dValue type=%s", __FUNCTION__, __LINE__, ckey, cval, (Py_TYPE(dValue))->tp_name); char *valStr = NULL; if (!PyDict_Check(dValue)) { valStr = new char [string(cval).length()+1]; std::strcpy (valStr, cval); Logger::getLogger()->debug("%s:%d, key=%s, value=%s, valStr=%s", __FUNCTION__, __LINE__, ckey, cval, valStr); } if(!strcmp(ckey, "name")) { info->name = valStr; } else if(!strcmp(ckey, "version")) { info->version = valStr; } else if(!strcmp(ckey, "mode")) { // Need to also handle mode values of the form "poll|control" stringstream ss(valStr); string s; info->options = 0; // Tokenizing w.r.t. pipe '|' while(getline(ss, s, '|')) { Logger::getLogger()->debug("%s: mode: Found token %s", __FUNCTION__, s.c_str()); if (s.compare("async")==0) { info->options |= SP_ASYNC; } else if (s.compare("control")==0) { info->options |= SP_CONTROL; } else if (s.compare("poll")==0) { // Nothing to set } else if (s.compare("none")==0) { // Ignore } else Logger::getLogger()->warn("%s: mode: Unknown token/value %s", __FUNCTION__, s.c_str()); } delete[] valStr; } else if(!strcmp(ckey, "type")) { info->type = valStr; } else if(!strcmp(ckey, "interface")) { info->interface = valStr; } else if(!strcmp(ckey, "config")) { // if 'config' value is of dict type, convert it to string if (strcmp((Py_TYPE(dValue))->tp_name, "dict")==0) { PyObject* objectsRepresentation = PyObject_Repr(dValue); const char* s = PyUnicode_AsUTF8(objectsRepresentation); Logger::getLogger()->debug("Py2C_PluginInfo(): INPUT: config value=%s", s); Py_CLEAR(objectsRepresentation); info->config = json_dumps(dValue); Logger::getLogger()->info("Py2C_PluginInfo(): OUTPUT: config value=%s", info->config); } else info->config = valStr; } else Logger::getLogger()->info("%s:%d: Unexpected key %s", __FUNCTION__, __LINE__, ckey); } return info; } /** * Function to invoke 'plugin_info' function in python plugin */ static PLUGIN_INFORMATION *plugin_info_fn() { if (!pythonModules) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_info_fn, plugin '%s'", gPluginName.c_str()); return NULL; } // Look for Python module for gPluginName key auto it = pythonModules->find(gPluginName); if (it == pythonModules->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_info(): " "pModule is NULL for plugin '%s'", gPluginName.c_str()); return NULL; } PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_info"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_info' " "in loaded python module '%s', m_module=%p", gPluginName.c_str(), it->second->m_module); } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_info' " "in loaded python module '%s'", gPluginName.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return NULL; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, NULL); Py_CLEAR(pFunc); PLUGIN_INFORMATION *info = NULL; // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method 'plugin_info' " ": error while getting result object, plugin '%s'", gPluginName.c_str()); logErrorMessage(); info = NULL; } else { // Parse plugin information info = Py2C_PluginInfo(pReturn); // Remove pReturn object Py_CLEAR(pReturn); } if (info) { // bump interface version to atleast 2.x so that we are able to handle // list of readings from python plugins in plugin_poll if (info->interface[0] =='1' && info->interface[1] == '.') { Logger::getLogger()->info("plugin_handle: plugin_info(): " "Updating interface version " "from '%s' to '2.0.0', plugin '%s'", info->interface, gPluginName.c_str()); delete[] info->interface; char *valStr = new char[6]; std::strcpy(valStr, "2.0.0"); info->interface = valStr; } Logger::getLogger()->info("plugin_handle: plugin_info(): info={name=%s, " "version=%s, options=%d, type=%s, interface=%s, config=%s}", info->name, info->version, info->options, info->type, info->interface, info->config); } PyGILState_Release(state); return info; } /** * Function to invoke 'plugin_init' function in python plugin * * @param config ConfigCategory configuration object * @retun PLUGIN_HANDLE object */ static PLUGIN_HANDLE plugin_init_fn(ConfigCategory *config) { // Get plugin name string pName = config->getValue("plugin"); if (!pythonModules) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_init_fn, plugin '%s'", pName.c_str()); return NULL; } Logger::getLogger()->debug("plugin_handle: plugin_init(): " "config->itemsToJSON()='%s'", config->itemsToJSON().c_str()); bool loadModule = false; bool reloadModule = false; bool pythonInitState = false; string loadPluginType; PythonModule* module = NULL; PyThreadState* newInterp = NULL; // Check wether plugin pName has been already loaded for (auto h = pythonHandles->begin(); h != pythonHandles->end(); ++h) { if (h->second->m_name.compare(pName) == 0) { Logger::getLogger()->debug("%s_plugin_init_fn: already loaded " "a plugin with name '%s'. Loading a new ", h->second->m_type.c_str(), pName.c_str()); // Set Python library loaded state pythonInitState = h->second->m_init; // Set plugin type loadPluginType = h->second->m_type; // Set load indicator loadModule = true; } } if (!loadModule) { // Plugin name not previously loaded: check current Python module // pName is the key auto it = pythonModules->find(pName); if (it == pythonModules->end()) { Logger::getLogger()->debug("plugin_handle: plugin_init(): " "pModule not found for plugin '%s': ", pName.c_str()); // Set plugin type PluginManager* pMgr = PluginManager::getInstance(); PLUGIN_HANDLE tmp = pMgr->findPluginByName(pName); if (tmp) { PLUGIN_INFORMATION* pInfo = pMgr->getInfo(tmp); if (pInfo) { loadPluginType = string(pInfo->type); } } // Set reload indicator reloadModule = true; } else { if (it->second && it->second->m_module) { // Just use current loaded module: no load or re-load action module = it->second; // Set Python library loaded state pythonInitState = it->second->m_init; } else { Logger::getLogger()->fatal("plugin_handle: plugin_init(): " "found pModule is NULL for plugin '%s': ", pName.c_str()); return NULL; } } } Logger::getLogger()->info("%s:%d: loadModule=%s, reloadModule=%s", __FUNCTION__, __LINE__, loadModule?"TRUE":"FALSE", reloadModule?"TRUE":"FALSE"); // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); // Import Python module using a new interpreter if (loadModule || reloadModule) { string fledgePythonDir; string fledgeRootDir(getenv("FLEDGE_ROOT")); fledgePythonDir = fledgeRootDir + "/python"; int argc = 2; // Set Python path for embedded Python 3.x // Get current sys.path - borrowed reference PyObject* sysPath = PySys_GetObject((char *)"path"); PyList_Append(sysPath, PyUnicode_FromString((char *) fledgePythonDir.c_str())); // Set sys.argv for embedded Python 3.x wchar_t* argv[argc]; argv[0] = Py_DecodeLocale("", NULL); argv[1] = Py_DecodeLocale(pName.c_str(), NULL); if (argc > 2) { argv[2] = Py_DecodeLocale(loadPluginType.c_str(), NULL); } // Set script parameters PySys_SetArgv(argc, argv); Logger::getLogger()->debug("%s_plugin_init_fn, %sloading plugin '%s', ", loadPluginType.c_str(), reloadModule ? "re-" : "", pName.c_str()); // Import Python script PyObject *newObj = PyImport_ImportModule(pName.c_str()); // Check result if (newObj) { // Create a new PythonModule PythonModule* newModule; if ((newModule = new PythonModule(newObj, pythonInitState, pName, loadPluginType, NULL)) == NULL) { // Release lock PyGILState_Release(state); Logger::getLogger()->fatal("plugin_handle: plugin_init(): " "failed to create Python module " "object, plugin '%s'", pName.c_str()); return NULL; } // Set module module = newModule; } else { logErrorMessage(); // Release lock PyGILState_Release(state); Logger::getLogger()->fatal("plugin_handle: plugin_init(): " "failed to import plugin '%s'", pName.c_str()); return NULL; } } Logger::getLogger()->debug("%s_plugin_init_fn for '%s', pModule '%p', ", loadPluginType.c_str(), module->m_name.c_str(), module->m_module); Logger::getLogger()->debug("%s:%d: calling set_loglevel_in_python_module(), loglevel=%s", __FUNCTION__, __LINE__, Logger::getLogger()->getMinLevel().c_str()); set_loglevel_in_python_module(module->m_module, module->m_name + " plugin_init"); PyObject *config_dict = json_loads(config->itemsToJSON().c_str()); // Call Python method passing an object PyObject* pReturn = PyObject_CallMethod(module->m_module, "plugin_init", "O", config_dict); Py_CLEAR(config_dict); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_init : " "error while getting result object, plugin '%s'", pName.c_str()); logErrorMessage(); } else { Logger::getLogger()->debug("plugin_handle: plugin_init(): " "got handle from python plugin='%p', *handle %p, plugin '%s'", pReturn, &pReturn, pName.c_str()); } // Add the handle to handles map as key, PythonModule object as value std::pair<std::map<PLUGIN_HANDLE, PythonModule*>::iterator, bool> ret; if (pythonHandles) { // Add to handles map the PythonModule object ret = pythonHandles->insert(pair<PLUGIN_HANDLE, PythonModule*> ((PLUGIN_HANDLE)pReturn, module)); if (ret.second) { Logger::getLogger()->debug("plugin_handle: plugin_init(): " "handle %p of python plugin '%s' " "added to pythonHandles map", pReturn, pName.c_str()); } else { Logger::getLogger()->error("plugin_handle: plugin_init(): " "failed to insert handle %p of " "python plugin '%s' to pythonHandles map", pReturn, pName.c_str()); Py_CLEAR(module->m_module); module->m_module = NULL; delete module; module = NULL; Py_CLEAR(pReturn); pReturn = NULL; } } // Release locks if (newInterp) { PyEval_ReleaseThread(newInterp); } else { PyGILState_Release(state); } return pReturn ? (PLUGIN_HANDLE) pReturn : NULL; } /** * Function to invoke 'plugin_reconfigure' function in python plugin * * @param handle The plugin handle from plugin_init_fn * @param config The new configuration, as string */ static void plugin_reconfigure_fn(PLUGIN_HANDLE* handle, const std::string& config) { Logger::getLogger()->debug("%s:%d: config=%s", __FUNCTION__, __LINE__, config.c_str()); if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_reconfigure(): " "handle is NULL"); return; } if (!pythonHandles) { Logger::getLogger()->error("pythonHandles map is NULL " "in plugin_reconfigure, plugin handle '%p'", handle); return; } // Look for Python module for handle key auto it = pythonHandles->find(*handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_reconfigure(): " "pModule is NULL, plugin handle '%p'", handle); return; } std::mutex mtx; PyObject* pFunc; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("plugin_handle: plugin_reconfigure(): " "pModule=%p, *handle=%p, plugin '%s'", it->second->m_module, *handle, it->second->m_name.c_str()); if(config.compare("logLevel") == 0) { Logger::getLogger()->debug("calling set_loglevel_in_python_module() for updating loglevel"); set_loglevel_in_python_module(it->second->m_module, it->second->m_name+" plugin_reconf"); PyGILState_Release(state); return; } // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_reconfigure"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_reconfigure' " "in loaded python module '%s'", it->second->m_name.c_str()); } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method plugin_reconfigure " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } Logger::getLogger()->debug("plugin_reconfigure with %s", config.c_str()); PyObject *new_config_dict = json_loads(config.c_str()); // Call Python method passing an object and a C string PyObject* pReturn = PyObject_CallFunction(pFunc, "OO", *handle, new_config_dict); Py_CLEAR(pFunc); Py_CLEAR(new_config_dict); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_reconfigure " ": error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); //*handle = NULL; // not sure if this should be treated as unrecoverable failure on python plugin side } else { // Save PythonModule PythonModule* currentModule = it->second; Py_CLEAR(*handle); *handle = pReturn; if (pythonHandles) { // Remove current handle from the pythonHandles map pythonHandles->erase(it); // Add the handle to handles map as key, PythonModule object as value std::pair<std::map<PLUGIN_HANDLE, PythonModule*>::iterator, bool> ret; ret = pythonHandles->insert(pair<PLUGIN_HANDLE, PythonModule*> ((PLUGIN_HANDLE)*handle, currentModule)); Logger::getLogger()->debug("plugin_handle: plugin_reconfigure(): " "updated handle %p of python plugin '%s'" " in pythonHandles map", *handle, currentModule->m_name.c_str()); } else { Logger::getLogger()->error("plugin_handle: plugin_reconfigure(): " "failed to update handle %p of python plugin '%s'" " in pythonHandles map", *handle, currentModule->m_name.c_str()); } } PyGILState_Release(state); } /** * Function to log error message encountered while interfacing with * Python runtime */ static void logErrorMessage() { PyObject* type; PyObject* value; PyObject* traceback; numpyImportError = false; PyErr_Fetch(&type, &value, &traceback); PyErr_NormalizeException(&type, &value, &traceback); PyObject* str_exc_value = PyObject_Repr(value); PyObject* pyExcValueStr = PyUnicode_AsEncodedString(str_exc_value, "utf-8", "Error ~"); const char* pErrorMessage = value ? PyBytes_AsString(pyExcValueStr) : "no error description."; Logger::getLogger()->warn("logErrorMessage: Error '%s', plugin '%s'", pErrorMessage, gPluginName.c_str()); // Check for numpy/pandas import errors const char *err1 = "implement_array_function method already has a docstring"; const char *err2 = "cannot import name 'check_array_indexer' from 'pandas.core.indexers'"; numpyImportError = strstr(pErrorMessage, err1) || strstr(pErrorMessage, err2); std::string fcn = ""; fcn += "def get_pretty_traceback(exc_type, exc_value, exc_tb):\n"; fcn += " import sys, traceback\n"; fcn += " lines = []\n"; fcn += " lines = traceback.format_exception(exc_type, exc_value, exc_tb)\n"; fcn += " output = '\\n'.join(lines)\n"; fcn += " return output\n"; PyRun_SimpleString(fcn.c_str()); PyObject* mod = PyImport_ImportModule("__main__"); if (mod != NULL) { PyObject* method = PyObject_GetAttrString(mod, "get_pretty_traceback"); if (method != NULL) { PyObject* outStr = PyObject_CallObject(method, Py_BuildValue("OOO", type, value, traceback)); if (outStr != NULL) { PyObject* tmp = PyUnicode_AsASCIIString(outStr); if (tmp != NULL) { std::string pretty = PyBytes_AsString(tmp); Logger::getLogger()->warn("%s", pretty.c_str()); Logger::getLogger()->printLongString(pretty.c_str()); } Py_CLEAR(tmp); } Py_CLEAR(outStr); } Py_CLEAR(method); } // Reset error PyErr_Clear(); // Remove references Py_CLEAR(type); Py_CLEAR(value); Py_CLEAR(traceback); Py_CLEAR(str_exc_value); Py_CLEAR(pyExcValueStr); Py_CLEAR(mod); } /** * Function to invoke 'plugin_shutdown' function in python plugin * * @param handle The plugin handle from plugin_init_fn */ static void plugin_shutdown_fn(PLUGIN_HANDLE handle) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_shutdown_fn: " "handle is NULL"); return; } if (!pythonHandles) { Logger::getLogger()->error("pythonHandles map is NULL " "in plugin_shutdown_fn, plugin handle '%p'", handle); return; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_shutdown_fn: " "pModule is NULL, plugin handle '%p'", handle); return; } if (! Py_IsInitialized()) { Logger::getLogger()->debug("%s - Python environment not initialized, exiting from the function ", __FUNCTION__); return; } PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_shutdown"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_shutdown' " "in loaded python module '%s'", it->second->m_name.c_str()); } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_shutdown' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, "O", handle); Py_CLEAR(pFunc); if (false) // no seperate python interpreter is used anymore for python plugins { // Switch to Interpreter thread PyThreadState* swapState = PyThreadState_Swap(it->second->m_tState); // Remove Python module Py_CLEAR(it->second->m_module); it->second->m_module = NULL; // Stop Interpreter thread Py_EndInterpreter(it->second->m_tState); Logger::getLogger()->debug("plugin_shutdown_fn: Py_EndInterpreter of '%p' " "for plugin '%s'", it->second->m_tState, it->second->m_name.c_str()); // Return to main thread PyThreadState_Swap(swapState); // Set pointer to null it->second->m_tState = NULL; } else { // Remove Python module Py_CLEAR(it->second->m_module); it->second->m_module = NULL; } PythonModule* module = it->second; string pName = it->second->m_name; // Remove item pythonHandles->erase(it); // Look for Python module, pName is the key auto m = pythonModules->find(pName); if (m != pythonModules->end()) { // Remove this element pythonModules->erase(m); } // Release module object delete module; module = NULL; // Release GIL PyGILState_Release(state); Logger::getLogger()->debug("plugin_shutdown_fn succesfully " "called for plugin '%s'", pName.c_str()); } }; #endif ================================================ FILE: C/services/core/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (Core) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(DLLIB -ldl) set(UUIDLIB -luuid) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) include_directories(. include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include ../common/include ../../common/include) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() file(GLOB core_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../lib) # Create static library add_library(core ${core_src}) target_link_libraries(core ${Boost_LIBRARIES}) target_link_libraries(core ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(core ${DLLIB}) target_link_libraries(core ${UUIDLIB}) target_link_libraries(core -lssl -lcrypto) target_link_libraries(core ${COMMON_LIB}) target_link_libraries(core ${SERVICE_COMMON_LIB}) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(storage ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(storage ws2_32 wsock32) endif() endif() ================================================ FILE: C/services/core/configuration_manager.cpp ================================================ /* * Fledge Fledge Configuration management. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sstream> #include <configuration_manager.h> #include <rapidjson/writer.h> using namespace std; using namespace rapidjson; ConfigurationManager *ConfigurationManager::m_instance = 0; /** * Constructor * * @param host Storage layer TCP address * @param port Storage layer TCP port */ ConfigurationManager::ConfigurationManager(const string& host, unsigned short port) { m_storage = new StorageClient(host, port); } // Destructor ConfigurationManager::~ConfigurationManager() { delete m_storage; } /** * Return the singleton instance of the configuration manager * * @param host Storage layer TCP address * @param port Storage layer TCP port * @return The configuration manager class instance */ ConfigurationManager* ConfigurationManager::getInstance(const string& host, unsigned short port) { if (m_instance == 0) { m_instance = new ConfigurationManager(host, port); } return m_instance; } /** * Return all Fledge categories from storage layer * * @return ConfigCategories class object with * key and description for all found categories. * @throw CategoryDetailsEx exception */ ConfigCategories ConfigurationManager::getAllCategoryNames() const { // Return object ConfigCategories categories; vector<Returns *> columns; columns.push_back(new Returns("key")); columns.push_back(new Returns("description")); Query qAllCategories(columns); ResultSet* allCategories = 0; try { // Query via Storage client allCategories = m_storage->queryTable("configuration", qAllCategories); if (!allCategories || !allCategories->rowCount()) { // Data layer error or no data to handle throw CategoryDetailsEx(); } // Fetch all cetegories ResultSet::RowIterator it = allCategories->firstRow(); do { ResultSet::Row* row = *it; if (!row) { throw CategoryDetailsEx(); } ResultSet::ColumnValue* key = row->getColumn("key"); ResultSet::ColumnValue* description = row->getColumn("description"); ConfigCategoryDescription *value = new ConfigCategoryDescription(key->getString(), description->getString()); // Add current row data to categories; categories.addCategoryDescription(value); } while (!allCategories->isLastRow(it++)); // Free result set delete allCategories; // Return object return categories; } catch (std::exception* e) { delete e; if (allCategories) { // Free result set delete allCategories; } throw CategoryDetailsEx(); } catch (...) { if (allCategories) { // Free result set delete allCategories; } throw CategoryDetailsEx(); } } /** * Return all the items of a specific category * from the storage layer. * * @param categoryName The specified category name * @return ConfigCategory calss object * with all category items * @throw NoSuchCategory exception * @throw ConfigCategoryEx exception * @throw CategoryDetailsEx exception */ ConfigCategory ConfigurationManager::getCategoryAllItems(const string& categoryName) const { // SELECT * FROM fledge.configuration WHERE key = categoryName const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, categoryName); Query qKey(wKey); ResultSet* categoryItems = 0; try { // Query via storage client categoryItems = m_storage->queryTable("configuration", qKey); if (!categoryItems) { throw ConfigCategoryEx(); } // Category not found if (!categoryItems->rowCount()) { throw NoSuchCategory(); } // Get first row ResultSet::RowIterator it = categoryItems->firstRow(); ResultSet::Row* row = *it; if (!row) { throw CategoryDetailsEx(); } // If we have an exception catch it and free the result set ResultSet::ColumnValue* key = row->getColumn("key"); ResultSet::ColumnValue* description = row->getColumn("description"); ResultSet::ColumnValue* items = row->getColumn("value"); // Create string representation of JSON object rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); const rapidjson::Value *v = items->getJSON(); v->Accept(writer); const string sItems(buffer.GetString(), buffer.GetSize()); // Create category object ConfigCategory theVal(key->getString(), sItems); // Set description theVal.setDescription(description->getString()); // Free result set delete categoryItems; return theVal; } catch (std::exception* e) { delete e; if (categoryItems) { // Free result set delete categoryItems; } throw ConfigCategoryEx(); } catch (NoSuchCategory& e) { if (categoryItems) { // Free result set delete categoryItems; } throw; } catch (...) { if (categoryItems) { // Free result set delete categoryItems; } throw ConfigCategoryEx(); } } /** * Create or update a new category * * @param categoryName The category name * @param categoryDescription The category description * @param categoryItems The category items * @param keepOriginalItems Keep stored iterms or replace them * @return The ConfigCategory object * with "value" and "default" * of the new category added * or the merged configuration * of the updated confguration. * @throw CategoryDetailsEx exception * @throw ConfigCategoryEx exception * @throw ConfigCategoryDefaultWithValue exception */ ConfigCategory ConfigurationManager::createCategory(const std::string& categoryName, const std::string& categoryDescription, const std::string& categoryItems, bool keepOriginalItems) const { // Fill the ready to insert category object with input data ConfigCategory preparedValue(categoryName, categoryItems); preparedValue.setDescription(categoryDescription); try { // Abort if items contain both value and default preparedValue.checkDefaultValuesOnly(); // Add 'value' from 'default' for each item preparedValue.setItemsValueFromDefault(); } catch (ConfigMalformed* e) { delete e; throw ConfigCategoryEx(); } catch (ConfigValueFoundWithDefault* e) { // The category items have both default and value properties // raise the ConfigCategoryDefaultWithValue exception; delete e; // Raise specific exception throw ConfigCategoryDefaultWithValue(); } catch (std::exception* e) { delete e; throw ConfigCategoryEx(); } catch (...) { throw ConfigCategoryEx(); } // Parse JSON input Document doc; // Parse the prepared input category with "value" and "default" doc.Parse(preparedValue.itemsToJSON().c_str()); if (doc.HasParseError()) { throw ConfigCategoryEx(); } // Set the JSON string for merged category values string updatedItems; // SELECT * FROM fledge.configuration WHERE key = categoryName const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, categoryName); Query qKey(wKey); ResultSet* result = 0; try { // Query via storage client result = m_storage->queryTable("configuration", qKey); if (!result) { throw ConfigCategoryEx(); } if (!result->rowCount()) { // Prepare insert values for insertTable InsertValues newCategory; newCategory.push_back(InsertValue("key", categoryName)); newCategory.push_back(InsertValue("description", categoryDescription)); // Set "value" field for inseert using the JSON document object newCategory.push_back(InsertValue("value", doc)); // Do the insert if (!m_storage->insertTable("configuration", newCategory)) { throw ConfigCategoryEx(); } } else { // The category already exists: fetch data ResultSet::RowIterator it = result->firstRow(); ResultSet::Row* row = *it; if (!row) { throw CategoryDetailsEx(); } // Get current category items ResultSet::ColumnValue* theItems = row->getColumn("value"); const Value* storedData = theItems->getJSON(); // Prepare for merge Document::AllocatorType& allocator = doc.GetAllocator(); Value inputValues = doc.GetObject(); /** * Merge input data with stored data: * stored configuration items are merged or replaced * accordingly to keepOriginalItems parameter value. * * Items "value" are preserved for items being updated, only "default" values * are overwritten. */ mergeCategoryValues(inputValues, storedData, allocator, keepOriginalItems); // Create the new JSON string representation of merged category items rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); // inputValues is the merged configuration inputValues.Accept(writer); // Set the JSON string with updated items updatedItems = string(buffer.GetString(), buffer.GetSize()); // Prepare WHERE id = val const Condition conditionKey(Equals); Where wKey("key", conditionKey, categoryName); // Prepare insert values for updateTable InsertValues updateCategoryValues; updateCategoryValues.push_back(InsertValue("key", categoryName)); updateCategoryValues.push_back(InsertValue("description", categoryDescription)); // Add the "value" DB field for UPDATE (inputValuea with merged data) updateCategoryValues.push_back(InsertValue("value", inputValues)); // Perform UPDATE fledge.configuration SET value = x WHERE okey = y if (!m_storage->updateTable("configuration", updateCategoryValues, wKey)) { throw ConfigCategoryEx(); } } bool returnNew = result->rowCount() == 0; // Free result set data delete result; if (returnNew) { // Return the new created category return preparedValue; } else { // Return the updated/merged category ConfigCategory returnValue(categoryName, updatedItems); returnValue.setDescription(categoryDescription); return returnValue; } } catch (std::exception* e) { delete e; if (result) { // Free result set delete result; } throw ConfigCategoryEx(); } catch (...) { if (result) { // Free result set delete result; } throw ConfigCategoryEx(); } } /** * Merge the input data with stored data: * * The stored configuration items are merged with new ones if * paramter keepOriginalItems is true otherwise they are replaced. * * The confguration items "value" objects are preserved * for the item names being updated, only the "default" values * are overwritten. * * Examples: * "value" : {"item_1" : { "description" : "B", "type" : "string", "default" : "TWO" } "item_7": { "description" : "Z", "type" : "string", "default" : "SEVEN" }} * * If "item_1" exists with "value" ONE and "default" ONE, the result is: * "value" : ONE, "default" : "TWO" * other fields in "item_1" are overwritten and any other item removed. * * if "item_1" doesn't exist and current data is * "value" : {"item_0" : { "description" : "A", "type" : "string", "default" : "NONE" }, "item_7": { "description" : "Z", "type" : "string", "default" : "SEVEN" }} * that entry is completely replaced by the new one "value" : {"item_1" : { ...}} * * * @param inputValues New inout configuration items * @param storedValues Current stored items in storage layer * @param keepOriginalItems Keep stored items or replace them * @throw NotSupportedDataType exception */ void ConfigurationManager::mergeCategoryValues(Value& inputValues, const Value* storedValues, Document::AllocatorType& allocator, bool keepOriginalItems) const { // Loop throught input data // For each item fetch the value of stored one, if existent for (Value::MemberIterator itr = inputValues.MemberBegin(); itr != inputValues.MemberEnd(); ++itr) { // Get current item name string itemName = itr->name.GetString(); // Find the itemName "value" in the stored data Value::ConstMemberIterator storedItr = storedValues->FindMember(itemName.c_str()); if (storedItr != storedValues->MemberEnd() && storedItr->value.IsObject()) { // Item name is present in stored data // 1. Remove current "value" itr->value.EraseMember("value"); // 2. Get itemName "value" in stored data auto& v = storedItr->value.GetObject()["value"]; Value object; // 3. Set new value switch (v.GetType()) { // String case (kStringType): { object.SetString(v.GetString(), allocator); itr->value.AddMember("value", object, allocator); break; } // Object case (kObjectType): { rapidjson::StringBuffer strbuf; rapidjson::Writer<rapidjson::StringBuffer> writer(strbuf); v.Accept(writer); object.SetString(strbuf.GetString(), allocator); itr->value.AddMember("value", object, allocator); break; } // Array and numbers not supported yet default: { throw NotSupportedDataType(); break; } } } } // Add stored items not found in input items only if we want to keep them. if (keepOriginalItems == true) { Value::ConstMemberIterator itr; // Loop throught stored data for (itr = storedValues->MemberBegin(); itr != storedValues->MemberEnd(); ++itr ) { string itemName = itr->name.GetString(); // Find the itemName in the inout data Value::MemberIterator inputItr = inputValues.FindMember(itemName.c_str()); if (inputItr == inputValues.MemberEnd()) { // Set item name Value name(itemName.c_str(), allocator); Value object; object.SetObject(); // Object copy object.CopyFrom(itr->value, allocator); // Add the new object inputValues.AddMember(name, object, allocator); } } } } /** * Get a given item within a given category * @param categoryName The given category * @param itemName The given item * @return JSON string with item details */ string ConfigurationManager::getCategoryItem(const string& categoryName, const string& itemName) const { ConfigCategory allItems = this->getCategoryAllItems(categoryName); return allItems.itemToJSON(itemName); } /** * Get the value of a given item within a given category * @param categoryName The given category * @param itemName The given item * @return string with item value * @throw NoSuchCategoryItemValue exception */ string ConfigurationManager::getCategoryItemValue(const string& categoryName, const string& itemName) const { try { ConfigCategory allItems = this->getCategoryAllItems(categoryName); return allItems.getValue(itemName); } catch (std::exception* e) { //catch pointer exceptions) delete e; throw NoSuchCategoryItemValue(); } catch (...) { // General catch throw NoSuchCategoryItemValue(); } } /** * Set the "value" entry of a given item within a given category. * * @param categoryName The given category * @param itemName The given item * @param newValue The "value" entry to set * @return True on success. * False on DB update error or storage layer exception * * @throw NoSuchCategoryItem exception * if categoryName/itemName doesn't exist */ bool ConfigurationManager::setCategoryItemValue(const std::string& categoryName, const std::string& itemName, const std::string& newValue) const { // Fetch itemName from categoryName string currentItemValue; try { currentItemValue = this->getCategoryItemValue(categoryName, itemName); } catch (...) { string errMsg("No details found for the category_name: " + categoryName); errMsg += " and config_item: " + itemName; throw NoSuchCategoryItem(errMsg); } /** * Check whether newValue is the same as currentValue * NOTE: * Does it work if newValue represents JSON object * istead of a simple value? */ if (currentItemValue.compare(newValue) == 0) { // Same value: return success return true; } // Prepare WHERE id = val const Condition conditionKey(Equals); Where wKey("key", conditionKey, categoryName); // Prepare jsonPropertis with one string vector: itemName, value vector<string> jsonPaths; jsonPaths.push_back(itemName); jsonPaths.push_back("value"); JSONProperties jsonValues; jsonValues.push_back(JSONProperty("value", jsonPaths, newValue)); try { // UPDATE fledge.configuration SET vale = JSON(jsonValues) // WHERE key = 'categoryName'; return (!m_storage->updateTable("configuration", jsonValues, wKey)) ? false : true; } catch (std::exception* e) { delete e; // Return failure return false; } catch (...) { // Return failure return false; } } /** * Add child categories under a given (parent) category * * @param parentCategoryName The parent category name * @param childCategories The child categories list (JSON array) * @return The JSON string with all (old and new) child * categories of the parent category name * @throw ChildCategoriesEx exception * @throw ExistingChildCategories exception * @thow NoSuchCategory exception */ string ConfigurationManager::addChildCategory(const string& parentCategoryName, const string& childCategories) const { // Check first parent category exists try { this->getCategoryAllItems(parentCategoryName); } catch (...) { throw NoSuchCategory(); } // Parse JSON input Document doc; // Parse the prepared input category with "value" and "default" doc.Parse(childCategories.c_str()); if (doc.HasParseError()) { throw ChildCategoriesEx(); } Value& children = doc["children"]; if (!children.IsArray()) { throw ChildCategoriesEx(); } unsigned int rowsAdded = 0; ResultSet* categoryItems = 0; for (Value::ConstValueIterator itr = children.Begin(); itr != children.End(); ++itr) { if (!(*itr).IsString()) { throw ChildCategoriesEx(); } string childCategory = (*itr).GetString(); // Note: all "children" categories must exist // SELECT * FROM fledge.configuration WHERE key = categoryName const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, childCategory); Query qKey(wKey); try { // Query via storage client categoryItems = m_storage->queryTable("configuration", qKey); if (!categoryItems) { throw ChildCategoriesEx(); } // Child category not found. throw exception if (!categoryItems->rowCount()) { throw NoSuchCategory(); } // Free result set delete categoryItems; // Check whether parent/child row already exists const Condition conditionParent(Equals); // Build the parent AND child WHHERE Where *wChild = new Where("child", conditionParent, childCategory); Where *wParent = new Where("parent", conditionParent, parentCategoryName, wChild); Query qParentChild(wParent); // Query via storage client categoryItems = m_storage->queryTable("category_children", qParentChild); if (!categoryItems) { throw ChildCategoriesEx(); } // Parent/child has been found: skip the insert if (categoryItems->rowCount()) { // Free result set delete categoryItems; continue; } // Free result set delete categoryItems; // Prepare insert values for insertTable InsertValues newCategory; newCategory.push_back(InsertValue("parent", parentCategoryName)); newCategory.push_back(InsertValue("child", (*itr).GetString())); /** * Do the insert: * we don't check for failed result as we checked * parent/child presence above */ m_storage->insertTable("category_children", newCategory); // Increment counter rowsAdded++; } catch (std::exception* e) { delete e; if (categoryItems) { // Free result set delete categoryItems; } throw ChildCategoriesEx(); } catch (NoSuchCategory& e) { if (categoryItems) { // Free result set delete categoryItems; } throw; } catch (...) { if (categoryItems) { // Free result set delete categoryItems; } throw ChildCategoriesEx(); } } // If no rows have been inserted, then abort if (!rowsAdded) { throw ExistingChildCategories(); } // Fetch current children of parentCategoryName; return this->fetchChildCategories(parentCategoryName); } /** * Fetch all child categories of a given parent one * @param parentCategoryName The given category name * @return JSON array string with child categories * @throw ChildCategoriesEx exception */ string ConfigurationManager::fetchChildCategories(const string& parentCategoryName) const { ostringstream currentChildCategories; // Fetch current children of parentCategoryName; // SELECT * FROM fledge.category_children WHERE parent = 'parentCategoryName' const Condition conditionCurrent(Equals); Where *wCurrent = new Where("parent", conditionCurrent, parentCategoryName); Query qCurrent(wCurrent); ResultSet* newCategories = 0; try { // Fetch all child categories newCategories = m_storage->queryTable("category_children", qCurrent); if (!newCategories) { throw ChildCategoriesEx(); } // Build ther JSON output currentChildCategories << "{ \"children\" : [ "; // If no child categories return empty array if (!newCategories->rowCount()) { delete newCategories; currentChildCategories << " ] }"; return currentChildCategories.str(); } // We have some data ResultSet::RowIterator it = newCategories->firstRow(); do { ResultSet::Row* row = *it; if (!row) { throw ChildCategoriesEx(); } // Add the child category to output result ResultSet::ColumnValue* child = row->getColumn("child"); currentChildCategories << "\""; currentChildCategories << child->getString(); currentChildCategories << "\""; if (!newCategories->isLastRow(it)) { currentChildCategories << ", "; } } while (!newCategories->isLastRow(it++)); currentChildCategories << " ] }"; // Free result set delete newCategories; // Returm child categories return currentChildCategories.str(); } catch (std::exception* e) { delete e; if (newCategories) { delete newCategories; } throw ChildCategoriesEx(); } catch (...) { if (newCategories) { delete newCategories; } throw ChildCategoriesEx(); } } /** * Get all the child categories of a given category name * * @param parentCategoryName The given category name * @return A ConfigCategories object * with child categories (name and description) * @throw ChildCategoriesEx exception */ ConfigCategories ConfigurationManager::getChildCategories(const string& parentCategoryName) const { ConfigCategories categories; try { // Fetch all child categories string childCategories = this->fetchChildCategories(parentCategoryName); // Parse JSON input Document doc; // Parse the prepared input category with "value" and "default" doc.Parse(childCategories.c_str()); if (doc.HasParseError() || !doc.HasMember("children")) { throw ChildCategoriesEx(); } // Get child categories Value& children = doc["children"]; if (!children.IsArray()) { throw ChildCategoriesEx(); } /** * For each element fetch then category description * and add the entry to ConfigCategories result */ for (Value::ConstValueIterator itr = children.Begin(); itr != children.End(); ++itr) { string categoryDesc; // Description must be a string if (!(*itr).IsString()) { throw ChildCategoriesEx(); } string categoryName = (*itr).GetString(); // Fetch description categoryDesc = this->getCategoryDescription(categoryName); ConfigCategoryDescription *value = new ConfigCategoryDescription(categoryName, categoryDesc); // Add current row data to categories; categories.addCategoryDescription(value); } // Return ConfigCategories object return categories; } catch (std::exception* e) { delete e; throw ChildCategoriesEx(); } catch (...) { throw ChildCategoriesEx(); } } /** * Get the categpry description of a given category * * @param categoryName The given category * @return The category description */ string ConfigurationManager::getCategoryDescription(const string& categoryName) const { // Note: // Any throw exception that must be catched by the caller ConfigCategory currentCategory = this->getCategoryAllItems(categoryName); return currentCategory.getDescription(); } /** * Remove the link between a child category and its parent. * The child becomes a root category when the link is broken. * Note the child category still exists after this call is made. * * @param parentCategoryName The parennt category * @param childCategory The child category to remove * @return JSON array string with remaining * child categories * @throw ChildCategoriesEx exception */ string ConfigurationManager::deleteChildCategory(const string& parentCategoryName, const string& childCategory) const { const Condition conditionParent(Equals); // Build the parent AND child WHHERE Where* wChild = new Where("child", conditionParent, childCategory); Where* wParent = new Where("parent", conditionParent, parentCategoryName, wChild); Query qParentChild(wParent); try { // Do the delete int deletedRows = m_storage->deleteTable("category_children", qParentChild); if (deletedRows == -1) { throw ChildCategoriesEx(); } return this->fetchChildCategories(parentCategoryName); } catch (std::exception* e) { delete e; throw ChildCategoriesEx(); } catch (...) { throw ChildCategoriesEx(); } } /** * Unset the category item value. * * @param categoryName The category name * @param itemName The item name * @return JSON string of category item * @throw ConfigCategoryEx exception * @throw NoSuchCategoryItem exception */ string ConfigurationManager::deleteCategoryItemValue(const string& categoryName, const string& itemName) const { try { // Set the empty value if (!this->setCategoryItemValue(categoryName, itemName, "")) { throw ConfigCategoryEx(); } // Return category item return this->getCategoryItem(categoryName, itemName); } catch (NoSuchCategoryItem& e) { throw; } catch (...) { throw ConfigCategoryEx(); } } /** * Delete a category from database. * Also remove the link between a child category and its parent. * * @param categoryName The category being deleted * @return The remaining config categories as object * @throw NoSuchCategory exception * @throw ConfigCategoryEx exception */ ConfigCategories ConfigurationManager::deleteCategory(const string& categoryName) const { const Condition conditionDelete(Equals); // Build WHERE key = 'categoryName' Where* wDelete = new Where("key", conditionDelete, categoryName); // Build the WHERE parent = 'categoryName' Where* wParent = new Where("parent", conditionDelete, categoryName); // DELETE from configuration Query qDelete(wDelete); // DELETE from category_children Query qParent(wParent); try { // Do the category delete int deletedRows = m_storage->deleteTable("configuration", qDelete); if (deletedRows == 0) { throw NoSuchCategory(); } else { if (deletedRows == -1) { throw ConfigCategoryEx(); } } // Do the child categores delete deletedRows = m_storage->deleteTable("category_children", qParent); if (deletedRows < 0) { throw ConfigCategoryEx(); } else { return getAllCategoryNames(); } } catch (NoSuchCategory& ex) { throw; } catch (...) { throw ConfigCategoryEx(); } } ================================================ FILE: C/services/core/core_management_api.cpp ================================================ /* * Fledge core microservice management API. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <core_management_api.h> #include <service_registry.h> #include <rapidjson/document.h> #include <rapidjson/writer.h> using namespace std; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; using namespace rapidjson; CoreManagementApi *CoreManagementApi::m_instance = 0; /** * Wrapper for "fake" registrer category interest * * TODO implement the missing functionality * This method is just a fake returning a fixed id to caller */ void registerInterestWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload("{\"id\" : \"1232abcd-8889-a568-0001-aabbccdd\"}"); *response << "HTTP/1.1 200 OK\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } void replaceSubstr(std::string& str, const std::string& from, const std::string& to) { size_t start_pos = 0; while((start_pos = str.find(from, start_pos)) != std::string::npos) { str.replace(start_pos, from.length(), to); start_pos += to.length(); } } /** * Easy wrapper for getting a specific service. * It is called to get storage service details: * example: GET /fledge/service?name=Fledge%20Storage * * Immediate utility is to get the management_port of * storage service when running tests. * TODO fully implemtent the getService API call */ void getServiceWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { // Get QUERY STRING from request string queryString = request->query_string; size_t pos = queryString.find("name="); if (pos != std::string::npos) { string serviceName = queryString.substr(pos + strlen("name=")); // replace %20 with SPACE /*serviceName = std::regex_replace(serviceName, std::regex("%20"), " "); */ // RHEL 7.6 gcc pkg "gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-36)" // doesn't support std:regex and std::regex_replace replaceSubstr(serviceName, "%20", " "); ServiceRegistry* registry = ServiceRegistry::getInstance(); ServiceRecord* foundService = registry->findService(serviceName); string payload; if (foundService) { // Set JSON string with service details // Note: the service UUID is missing at the time being // TODO add all API required fields foundService->asJSON(payload); } else { // Return not found message payload = "{ \"message\": \"error: service name not found\" }"; } *response << "HTTP/1.1 200 OK\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } else { string errorMsg("{ \"message\": \"error: find service by name is supported right now\" }"); *response << "HTTP/1.1 200 OK\r\nContent-Length: " << errorMsg.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << errorMsg; } } /** * Wrapper for service registration method */ void registerMicroServiceWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->registerMicroService(response, request); } /** * Wrapper for service registration method */ void unRegisterMicroServiceWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->unRegisterMicroService(response, request); } /** * Wrapper for get all categories */ void getAllCategoriesWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->getAllCategories(response, request); } /** * Wrapper for get category name */ void getCategoryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->getCategory(response, request); } /** * Wrapper for get category name * Also handle th special item name 'children' * return ing child categoriies instead of the given item * * GET /fledge/service/category/{categoryName}/{itemName} * returns JSON string with item properties * GET /fledge/service/category/{categoryName}/children * returns JSON string with child categories */ void getCategoryItemWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->getCategoryItem(response, request); } /** * Wrapper for delete a category item value */ void deleteCategoryItemValueWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->deleteCategoryItemValue(response, request); } /** * Wrapper for set category item value */ void setCategoryItemValueWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->setCategoryItemValue(response, request); } /** * Wrapper for delete category */ void deleteCategoryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->deleteCategory(response, request); } /** * Wrapper for delete child category */ void deleteChildCategoryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->deleteChildCategory(response, request); } /** * Wrapper for create category */ void createCategoryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->createCategory(response, request); } /** * Wrapper for create child categories */ void addChildCategoryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->addChildCategory(response, request); } /** * Received a GET /fledge/service/category/{categoryName} */ void CoreManagementApi::getCategory(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; // Fetch category items ConfigCategory category = m_config->getCategoryAllItems(categoryName); // Build JSON output ostringstream convert; convert << category.itemsToJSON(); // Send JSON data to client respond(response, convert.str()); } catch (NoSuchCategory& ex) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "get category", ex.what()); } // TODO: also catch the exceptions from ConfigurationManager // and return proper message catch (exception ex) { internalError(response, ex); } } /** * Received a GET /fledge/service/category/{categoryName}/{itemName] */ void CoreManagementApi::getCategoryItem(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; string itemName = request->path_match[CATEGORY_ITEM_COMPONENT]; if (itemName.compare("children") == 0) { // Fetch child categories ConfigCategories childCategories = m_config->getChildCategories(categoryName); // Send JSON data to client respond(response, "{ \"categories\" : " + childCategories.toJSON() + " }"); } else { // Fetch category item string categoryIitem = m_config->getCategoryItem(categoryName, itemName); // Send JSON data to client respond(response, categoryIitem); } } // Catch the exceptions from ConfigurationManager // and return proper message catch (ChildCategoriesEx& ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "get child categories", ex.what()); } catch (NoSuchCategory& ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "get category item", ex.what()); } catch (ConfigCategoryEx& ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "get category item", ex.what()); } catch (CategoryDetailsEx& ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "get category item", ex.what()); } catch (exception ex) { internalError(response, ex); } } /** * Received a GET /fledge/service/category */ void CoreManagementApi::getAllCategories(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { // Fetch all categories ConfigCategories allCategories = m_config->getAllCategoryNames(); // Build JSON output ostringstream convert; convert << "{ \"categories\" : [ "; convert << allCategories.toJSON(); convert << " ] }"; // Send JSON data to client respond(response, convert.str()); } // TODO: also catch the exceptions from ConfigurationManager // and return proper message catch (exception ex) { internalError(response, ex); } } /** * Wrapper function for the default resource call. * This is called whenever an unrecognised entry point call is received. */ void defaultWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { CoreManagementApi *api = CoreManagementApi::getInstance(); api->defaultResource(response, request); } /** * Handle a bad URL endpoint call */ void CoreManagementApi::defaultResource(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload("{ \"error\" : \"Unsupported URL: " + request->path + "\" }"); respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); } /** * Construct a microservices management API manager class */ CoreManagementApi::CoreManagementApi(const string& name, const unsigned short port) : ManagementApi(name, port) { // Setup supported URL and HTTP methods // Services m_server->resource[REGISTER_SERVICE]["POST"] = registerMicroServiceWrapper; m_server->resource[UNREGISTER_SERVICE]["DELETE"] = unRegisterMicroServiceWrapper; m_server->resource[GET_SERVICE]["GET"] = getServiceWrapper; // Register category interest // TODO implement this, right now it's just a fake m_server->resource[REGISTER_CATEGORY_INTEREST]["POST"] = registerInterestWrapper; // Default wrapper m_server->default_resource["GET"] = defaultWrapper; m_server->default_resource["PUT"] = defaultWrapper; m_server->default_resource["POST"] = defaultWrapper; m_server->default_resource["DELETE"] = defaultWrapper; m_server->default_resource["HEAD"] = defaultWrapper; m_server->default_resource["CONNECT"] = defaultWrapper; // Set the instance m_instance = this; } /** * Return the singleton instance of the core management interface * * Note if one has not been explicitly created then this will * return 0. */ CoreManagementApi *CoreManagementApi::getInstance() { return m_instance; } /** * Received a service registration request */ void CoreManagementApi::registerMicroService(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; string uuid, payload, responsePayload; try { ServiceRegistry *registry = ServiceRegistry::getInstance(); payload = request->content.string(); Document doc; if (doc.Parse(payload.c_str()).HasParseError()) { } else { string name, type, protocol, address; unsigned short port, managementPort; if (doc.HasMember("name")) { name = string(doc["name"].GetString()); } if (doc.HasMember("type")) { type = string(doc["type"].GetString()); } if (doc.HasMember("address")) { address = string(doc["address"].GetString()); } if (doc.HasMember("protocol")) { protocol = string(doc["protocol"].GetString()); } if (doc.HasMember("service_port")) { port = doc["service_port"].GetUint(); } if (doc.HasMember("management_port")) { managementPort = doc["management_port"].GetUint(); } ServiceRecord *srv = new ServiceRecord(name, type, protocol, address, port, managementPort); if (!registry->registerService(srv)) { errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "register service", "Failed to register service"); return; } // Setup configuration API entry points if (type.compare("Storage") == 0) { /** * Storage layer is registered * Setup ConfigurationManager instance and URL entry points */ if (!getConfigurationManager(address, port)) { errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "ConfigurationManager", "Failed to connect to storage service"); return; } // Add Configuration Manager URL entry points setConfigurationEntryPoints(); } // Set service uuid uuid = registry->getUUID(srv); } convert << "{ \"id\" : \"" << uuid << "\", "; convert << "\"message\" : \"Service registered successfully\""; convert << " }"; responsePayload = convert.str(); respond(response, responsePayload); } catch (exception ex) { internalError(response, ex); } } /** * Received a service unregister request */ void CoreManagementApi::unRegisterMicroService(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ostringstream convert; try { ServiceRegistry *registry = ServiceRegistry::getInstance(); string uuid = request->path_match[UUID_COMPONENT]; if (registry->unRegisterService(uuid)) { convert << "{ \"id\" : " << uuid << ","; convert << "\"message\" : \"Service unregistered successfully\""; convert << " }"; string payload = convert.str(); respond(response, payload); } else { errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "unregister service", "Failed to unregister service"); } } catch (exception ex) { internalError(response, ex); } } /** * Send back an error response * * @param response The HTTP Response * @param statusCode The HTTP status code * @param entryPoint The entry point in the API * @param msg The actual error message */ void CoreManagementApi::errorResponse(shared_ptr<HttpServer::Response> response, SimpleWeb::StatusCode statusCode, const string& entryPoint, const string& msg) { ostringstream convert; convert << "{ \"message\" : \"" << msg << "\", "; convert << "\"entryPoint\" : \"" << entryPoint << "\" }"; respond(response, statusCode, convert.str()); } /** * Handle a exception by sending back an internal error * * @param response The HTTP response * @param ex The exception that caused the error */ void CoreManagementApi::internalError(shared_ptr<HttpServer::Response> response, const exception& ex) { string payload = "{ \"Exception\" : \""; payload = payload + string(ex.what()); payload = payload + "\" }"; Logger *logger = Logger::getLogger(); logger->error("CoreManagementApi Internal Error: %s\n", ex.what()); respond(response, SimpleWeb::StatusCode::server_error_internal_server_error, payload); } /** * HTTP response method */ void CoreManagementApi::respond(shared_ptr<HttpServer::Response> response, const string& payload) { *response << "HTTP/1.1 200 OK\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } /** * HTTP response method */ void CoreManagementApi::respond(shared_ptr<HttpServer::Response> response, SimpleWeb::StatusCode statusCode, const string& payload) { *response << "HTTP/1.1 " << status_code(statusCode) << "\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } /** * Instantiate the ConfigurationManager class * having storage service already registered * * @return True if ConfigurationManager is set * False otherwise. */ bool CoreManagementApi::getConfigurationManager(const string& address, const unsigned short port) { // Instantiate the ConfigurationManager if (!(m_config = ConfigurationManager::getInstance(address, port))) { return false; } Logger *logger = Logger::getLogger(); logger->info("Storage service is connected: %s:%d\n", address.c_str(), port); return true; } /** * Add configuration manager entry points */ void CoreManagementApi::setConfigurationEntryPoints() { // Add Configuration Manager entry points m_server->resource[GET_ALL_CATEGORIES]["GET"] = getAllCategoriesWrapper; m_server->resource[GET_CATEGORY]["GET"] = getCategoryWrapper; // This also hanles 'children' param for child categories m_server->resource[GET_CATEGORY_ITEM]["GET"] = getCategoryItemWrapper; m_server->resource[DELETE_CATEGORY_ITEM_VALUE]["DELETE"] = deleteCategoryItemValueWrapper; m_server->resource[SET_CATEGORY_ITEM_VALUE]["PUT"] = setCategoryItemValueWrapper; m_server->resource[DELETE_CATEGORY]["DELETE"] = deleteCategoryWrapper; m_server->resource[DELETE_CHILD_CATEGORY]["DELETE"] = deleteChildCategoryWrapper; m_server->resource[CREATE_CATEGORY]["POST"] = createCategoryWrapper; m_server->resource[ADD_CHILD_CATEGORIES]["POST"] = addChildCategoryWrapper; Logger *logger = Logger::getLogger(); logger->info("ConfigurationManager setup is done."); } /** * Received a DELETE /fledge/service/category/{categoryName}/{configItem}/value */ void CoreManagementApi::deleteCategoryItemValue(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; string itemName = request->path_match[CATEGORY_ITEM_COMPONENT]; string value = request->path_match[ITEM_VALUE_NAME]; try { // Unset the item value and return current updated item string updatedItem = m_config->deleteCategoryItemValue(categoryName, itemName); respond(response, updatedItem); } catch (NoSuchCategoryItem& ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "delete category item value", ex.what()); } catch (exception ex) { internalError(response, ex); } } /** * Received PUT /fledge/service/category/{categoryName}/{configItem} * Payload is {"value" : "some_data"} * Send to client the JSON string of category item properties */ void CoreManagementApi::setCategoryItemValue(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; string itemName = request->path_match[CATEGORY_ITEM_COMPONENT]; string value = request->path_match[ITEM_VALUE_NAME]; // Get PUT data string payload = request->content.string(); Document doc; if (doc.Parse(payload.c_str()).HasParseError() || !doc.HasMember("value")) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "set category item value", "failure while parsing JSON data"); } else { // TODO: it can be JSON object, tranform it to a string string theValue = doc["value"].GetString(); // Set the new value if (!m_config->setCategoryItemValue(categoryName, itemName, theValue)) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "set category item value", "failure while writing to storage layer"); } else { // Send JSON data this->respond(response, m_config->getCategoryItem(categoryName, itemName)); } } } catch(NoSuchCategoryItem& ex) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "set category item value", ex.what()); } catch (exception ex) { internalError(response, ex); } } /** * Delete a config category * Received DELETE /fledge/service/category/{categoryName} * Send to client the JSON string of all remaining categories */ void CoreManagementApi::deleteCategory(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; ConfigCategories updatedCategories = m_config->deleteCategory(categoryName); this->respond(response, "{ \"categories\" : " + updatedCategories.toJSON() + " }"); return; } catch (NoSuchCategory& ex) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "delete category", ex.what()); } catch (exception ex) { internalError(response, ex); } } /** * Delete child categories of a config category * Received DELETE /fledge/service/category/{categoryName}/children/{childCategory} * Send to client the JSON string of all remaining categories */ void CoreManagementApi::deleteChildCategory(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; string childCategoryName = request->path_match[CHILD_CATEGORY_COMPONENT]; // Remove selecte child cateogry fprm parent category string updatedChildren = m_config->deleteChildCategory(categoryName, childCategoryName); this->respond(response, updatedChildren); } catch (ChildCategoriesEx& ex) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "delete child category", ex.what()); } catch (exception ex) { internalError(response, ex); } } /** * Create a new configuration category * Received POST /fledge/service/category * * Send to client the JSON string of new category's items */ void CoreManagementApi::createCategory(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { bool keepOriginalItems = false; // Get query_string string queryString = request->query_string; size_t pos = queryString.find("keep_original_items"); if (pos != std::string::npos) { string paramValue = queryString.substr(pos + strlen("keep_original_items=")); for (auto &c: paramValue) c = tolower(c); if (paramValue.compare("true") == 0) { keepOriginalItems = true; } } // Get POST data string payload = request->content.string(); Document doc; if (doc.Parse(payload.c_str()).HasParseError() || !doc.HasMember("key") || !doc.HasMember("description") || !doc.HasMember("value") || // It must be an object !doc["value"].IsObject() || // It must be a string !doc["key"].IsString() || // It must be a string !doc["description"].IsString()) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "create category", "failure while parsing JSON data"); return; } // Get the JSON input properties string categoryName = doc["key"].GetString(); string categoryDescription = doc["description"].GetString(); const Value& categoryItems = doc["value"]; // Create string representation of JSON object rapidjson::StringBuffer buffer; rapidjson::Writer<rapidjson::StringBuffer> writer(buffer); categoryItems.Accept(writer); const string sItems(buffer.GetString(), buffer.GetSize()); // Create the new config category ConfigCategory items = m_config->createCategory(categoryName, categoryDescription, sItems, keepOriginalItems); // Return JSON string of the new created category this->respond(response, items.toJSON()); } catch (ConfigCategoryDefaultWithValue& ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "create category", ex.what()); } catch (ConfigCategoryEx ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "create category", ex.what()); } catch (CategoryDetailsEx ex) { this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "create category", ex.what()); } catch (exception ex) { internalError(response, ex); } } /** * Add child categories to a given category name * Received POST /fledge/service/category/{categoryName}/children * * Send to client the JSON string with child categories */ void CoreManagementApi::addChildCategory(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { // Get categopryName string categoryName = request->path_match[CATEGORY_NAME_COMPONENT]; // Get POST data string childCategories = request->content.string(); Document doc; if (doc.Parse(childCategories.c_str()).HasParseError() || // It must be an object !doc.IsObject()) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "add child category", "failure while parsing JSON data"); return; } // Add new child categories and return all child items JSON list this->respond(response, m_config->addChildCategory(categoryName, childCategories)); } catch (ExistingChildCategories& ex) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "add child category", ex.what()); } catch (NoSuchCategory& ex) { // Return proper error message this->errorResponse(response, SimpleWeb::StatusCode::client_error_bad_request, "add child category", ex.what()); } catch (exception ex) { internalError(response, ex); } } ================================================ FILE: C/services/core/include/configuration_manager.h ================================================ #ifndef _CONFIGURATION_MANAGER_H #define _CONFIGURATION_MANAGER_H /* * Fledge Configuration management. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <storage_client.h> #include <config_category.h> #include <string> class ConfigurationManager { public: static ConfigurationManager* getInstance(const std::string&, short unsigned int); // Called by microservice management API or the admin API: // GET /fledge/service/category // GET /fledge//category ConfigCategories getAllCategoryNames() const; // Called by microservice management API or the admin API: // GET /fledge/service/category/{category_name} // GET /fledge/category/{category_name} ConfigCategory getCategoryAllItems(const std::string& categoryName) const; // Called by microservice management API or the admin API: // POST /fledge/service/category // POST /fledge/category ConfigCategory createCategory(const std::string& categoryName, const std::string& categoryDescription, const std::string& categoryItems, bool keepOriginalIterms = false) const; // Called by microservice management API or the admin API: // GET /fledge/service/category/{categoryName}/{configItem} // GET /fledge/category/{categoryName}/{configItem} std::string getCategoryItem(const std::string& categoryName, const std::string& itemName) const; // Called by microservice management API or the admin API: // PUT /fledge/service/category/{categoryName}/{configItem} // PUT /fledge/service/{categoryName}/{configItem} bool setCategoryItemValue(const std::string& categoryName, const std::string& itemName, const std::string& newValue) const; // Called by microservice management API or the admin API: // POST /fledge/service/category/{categoryName}/children // POST /fledge/category/{categoryName}/children std::string addChildCategory(const std::string& parentCategoryName, const std::string& childCategories) const; // Called by microservice management API or the admin API: // GET /fledge/service/category/{categoryName}/children // GET /fledge/category/{categoryName}/children ConfigCategories getChildCategories(const std::string& parentCategoryName) const; // Called by microservice management API or the admin API: // DELETE /fledge/service/category/{CategoryName}/children/{ChildCategory} // DELETE /fledge/category/{CategoryName}/children/{ChildCategory} std::string deleteChildCategory(const std::string& parentCategoryName, const std::string& childCategory) const; // Called by microservice management API or the admin API: // DELETE /fledge/service/category/{categoryName}/{configItem}/value // DELETE /fledge/category/{categoryName}/{configItem}/value std::string deleteCategoryItemValue(const std::string& categoryName, const std::string& itemName) const; // Called by microservice management API or the admin API: // DELETE /fledge/service/category/{categoryName} // DELETE /fledge/category/{categoryName} ConfigCategories deleteCategory(const std::string& categoryName) const; // Internal usage std::string getCategoryItemValue(const std::string& categoryName, const std::string& itemName) const; private: ConfigurationManager(const std::string& host, unsigned short port); ~ConfigurationManager(); void mergeCategoryValues(rapidjson::Value& inputValues, const rapidjson::Value* storedValues, rapidjson::Document::AllocatorType& allocator, bool keepOriginalitems) const; // Internal usage std::string fetchChildCategories(const std::string& parentCategoryName) const; std::string getCategoryDescription(const std::string& categoryName) const; private: static ConfigurationManager* m_instance; StorageClient* m_storage; }; /** * NoSuchCategory */ class NoSuchCategory : public std::exception { public: virtual const char* what() const throw() { return "Config category does not exist"; } }; /** * NoSuchCategoryItemValue */ class NoSuchCategoryItemValue : public std::exception { public: virtual const char* what() const throw() { return "Failure while fetching config category item value"; } }; /** * NoSuchItem */ class NoSuchCategoryItem : public std::exception { public: NoSuchCategoryItem(const std::string& message) { m_error = message; } virtual const char* what() const throw() { return m_error.c_str(); } private: std::string m_error; }; /** * CategoryDetailsEx */ class CategoryDetailsEx : public std::exception { public: virtual const char* what() const throw() { return "Cannot access category informations"; } }; /** * StorageOperation */ class StorageOperation : public std::exception { public: virtual const char* what() const throw() { return "Failure while performing insert or update operation"; } }; /** * NotSupportedDataType */ class NotSupportedDataType : public std::exception { public: virtual const char* what() const throw() { return "Data type not supported"; } }; /** * AllCategoriesEx */ class AllCategoriesEx : public std::exception { public: virtual const char* what() const throw() { return "Failure while fetching all config categories"; } }; /** * ConfigCategoryDefaultWithValue */ class ConfigCategoryDefaultWithValue : public std::exception { public: virtual const char* what() const throw() { return "The config category being inserted/updated has both default and value properties for items"; } }; /** * ConfigCategoryEx */ class ConfigCategoryEx : public std::exception { public: virtual const char* what() const throw() { return "Failure while setting/fetching a config category"; } }; /** * ChildCategoriesEx */ class ChildCategoriesEx : public std::exception { public: virtual const char* what() const throw() { return "Failure while setting/fetching child categories"; } }; /** * ExistingChildCategories */ class ExistingChildCategories : public std::exception { public: virtual const char* what() const throw() { return "Requested child categories are already set for the given parent category"; } }; #endif ================================================ FILE: C/services/core/include/core_management_api.h ================================================ #ifndef _CORE_MANAGEMENT_API_H #define _CORE_MANAGEMENT_API_H /* * Fledge core microservice management API. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <management_api.h> #include <configuration_manager.h> #define REGISTER_SERVICE "/fledge/service" #define UNREGISTER_SERVICE "/fledge/service/([0-9A-F][0-9A-F\\-]*)" #define GET_ALL_CATEGORIES "/fledge/service/category" #define CREATE_CATEGORY GET_ALL_CATEGORIES #define GET_CATEGORY "/fledge/service/category/([A-Za-z][a-zA-Z_0-9]*)" #define GET_CATEGORY_ITEM "/fledge/service/category/([A-Za-z][a-zA-Z_0-9]*)/([A-Za-z][a-zA-Z_0-9]*)" #define DELETE_CATEGORY_ITEM_VALUE "/fledge/service/category/([A-Za-z][a-zA-Z_0-9]*)/([A-Za-z][a-zA-Z_0-9]*)/(value)" #define SET_CATEGORY_ITEM_VALUE GET_CATEGORY_ITEM #define DELETE_CATEGORY GET_CATEGORY #define DELETE_CHILD_CATEGORY "/fledge/service/category/([A-Za-z][a-zA-Z_0-9]*)/(children)/([A-Za-z][a-zA-Z_0-9]*)" #define ADD_CHILD_CATEGORIES "/fledge/service/category/([A-Za-z][a-zA-Z_0-9]*)/(children)" #define REGISTER_CATEGORY_INTEREST "/fledge/interest" // TODO implment this, right now it's a fake. #define GET_SERVICE REGISTER_SERVICE #define UUID_COMPONENT 1 #define CATEGORY_NAME_COMPONENT 1 #define CATEGORY_ITEM_COMPONENT 2 #define ITEM_VALUE_NAME 3 #define CHILD_CATEGORY_COMPONENT 3 using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; /** * Management API server for a C++ microservice */ class CoreManagementApi : public ManagementApi { public: CoreManagementApi(const std::string& name, const unsigned short port); ~CoreManagementApi() {}; static CoreManagementApi *getInstance(); void registerMicroService(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); void unRegisterMicroService(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // GET /fledge/service/category void getAllCategories(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // GET /fledge/service/category/{categoryName} void getCategory(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // GET /fledge/service/category/{categoryName}/{configItem} // GET /fledge/service/category/{categoryName}/children void getCategoryItem(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // DELETE /fledge/service/category/{categoryName}/{configItem}/value void deleteCategoryItemValue(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // PUT /fledge/service/category/{categoryName}/{configItemn} void setCategoryItemValue(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // Called by DELETE /fledge/service/category/{categoryName} void deleteCategory(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // Called by DELETE /fledge/service/category/{CategoryName}/children/{ChildCategory} void deleteChildCategory(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // Called by POST /fledge/service/category void createCategory(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // Called by POST /fledge/service/category/{categoryName}/children void addChildCategory(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); // Default handler for unsupported URLs void defaultResource(std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> request); private: void errorResponse(std::shared_ptr<HttpServer::Response> response, SimpleWeb::StatusCode statusCode, const std::string& entryPoint, const std::string& msg); void internalError(std::shared_ptr<HttpServer::Response>, const std::exception&); void respond(std::shared_ptr<HttpServer::Response> response, SimpleWeb::StatusCode statusCode, const std::string& payload); void respond(std::shared_ptr<HttpServer::Response> response, const std::string& payload); bool getConfigurationManager(const std::string& address, const unsigned short port); void setConfigurationEntryPoints(); private: static CoreManagementApi* m_instance; ConfigurationManager* m_config; }; #endif ================================================ FILE: C/services/core/include/service_registry.h ================================================ #ifndef _SERVICE_REGISTRY_H #define _SERVICE_REGISTRY_H /* * Fledge service registry. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <service_record.h> #include <vector> #include <map> #include <string> /** * ServiceRegistry Singleton class */ class ServiceRegistry { public: static ServiceRegistry *getInstance(); bool registerService(ServiceRecord *service); bool unRegisterService(ServiceRecord *service); bool unRegisterService(const std::string& uuid); ServiceRecord *findService(const std::string& name); std::string getUUID(ServiceRecord *service); private: ServiceRegistry(); ~ServiceRegistry(); static ServiceRegistry *m_instance; std::vector<ServiceRecord *> m_services; std::map<std::string, ServiceRecord *> m_uuids; }; #endif ================================================ FILE: C/services/core/service_registry.cpp ================================================ /* * Fledge service registry. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <service_registry.h> #include <uuid/uuid.h> using namespace std; ServiceRegistry *ServiceRegistry::m_instance = 0; /** * Create the service registry singleton class */ ServiceRegistry::ServiceRegistry() { } /** * Destroy the service registry singleton class */ ServiceRegistry::~ServiceRegistry() { for (vector<ServiceRecord *>::iterator it = m_services.begin(); it != m_services.end(); ++it) { delete *it; } } /** * Return the singleton instance of the service registry */ ServiceRegistry *ServiceRegistry::getInstance() { if (m_instance == 0) m_instance = new ServiceRegistry(); return m_instance; } /** * Register a service with the service registry * * @param service The service to register * @return bool True if the service was registered */ bool ServiceRegistry::registerService(ServiceRecord *service) { uuid_t uuid; char uuid_str[37]; ServiceRecord *existing; if ((existing = findService(service->getName())) != 0) { if (existing->getAddress().compare(service->getAddress()) || existing->getType().compare(service->getType()) || existing->getPort() != service->getPort()) { /* Service already registered with the same name on * a different address, port or type */ return false; } // Overwrite existing service unRegisterService(existing); } m_services.push_back(service); uuid_generate_time_safe(uuid); uuid_unparse_lower(uuid, uuid_str); m_uuids[string(uuid_str)] = service; return true; } /** * Unregister a service with the service registry * * @param service The service to unregister * @return bool True if the service was unregistered */ bool ServiceRegistry::unRegisterService(ServiceRecord *service) { for (vector<ServiceRecord *>::iterator it = m_services.begin(); it != m_services.end(); ++it) { if (*service == **it) { m_services.erase(it); for (map<string, ServiceRecord *>::iterator uit = m_uuids.begin(); uit != m_uuids.end(); ++ uit) { if (uit->second == service) { m_uuids.erase(uit); break; } } return true; } } return false; } /** * Unregister a service with the service registry * * @param uuid The uuid of the service to unregister * @return bool True if the service was unregistered */ bool ServiceRegistry::unRegisterService(const string& uuid) { ServiceRecord *service; map<string, ServiceRecord *>::iterator uuidIt; if ((uuidIt = m_uuids.find(uuid)) == m_uuids.end()) { return false; } service = m_uuids[uuid]; for (vector<ServiceRecord *>::iterator it = m_services.begin(); it != m_services.end(); ++it) { if (*service == **it) { m_services.erase(it); m_uuids.erase(uuidIt); return true; } } return false; } /** * Find a service that is registered with the service registry * * @param name The name of the service to find * @return ServiceRecord* The service record or null if not found */ ServiceRecord *ServiceRegistry::findService(const string& name) { for (vector<ServiceRecord *>::iterator it = m_services.begin(); it != m_services.end(); ++it) { if ((*it)->getName().compare(name) == 0) return *it; } return 0; } /** * Return the uuid of the registration record for a given service * * @param service The service to return the uuid for * @return string The uud of the service registration * @throws eception If the service could not be found */ string ServiceRegistry::getUUID(ServiceRecord *service) { map<string, ServiceRecord *>::const_iterator it; for (it = m_uuids.cbegin(); it != m_uuids.cend(); ++it) { if (it->second == service) { return it->first; } } throw new exception(); } ================================================ FILE: C/services/filter-plugin-interfaces/python/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(filter-plugin-python-interface) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) # Find source files file(GLOB SOURCES python_plugin_interface.cpp) # Find Python.h 3.x dev/lib package if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include ../../../common/include ../../../services/common/include ../../../services/south/include ../../../thirdparty/rapidjson/include) include_directories(../../../services/common-plugin-interfaces/python/include) include_directories(../../../thirdparty/Simple-Web-Server) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${DLLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) endif() set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/services/filter-plugin-interfaces/python/filter_ingest_pymodule/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(filter_ingest) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) # Find source files file(GLOB SOURCES ingest_callback_pymodule.cpp) # Find Python 3.5 or higher dev/lib/interp package #find_package(PythonInterp 3.5 REQUIRED) if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include) include_directories(../../../../common/include) include_directories(../../../../services/common/include) include_directories(../../../../services/south/include) include_directories(../../../../thirdparty/rapidjson/include) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() link_directories(${PROJECT_BINARY_DIR}/../../../../lib) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../../../../python) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) endif() target_link_libraries(${PROJECT_NAME} ${DLLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE C) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1) set_target_properties(${PROJECT_NAME} PROPERTIES PREFIX "") # Install libraries install(TARGETS ${PROJECT_NAME} DESTINATION fledge/python) ================================================ FILE: C/services/filter-plugin-interfaces/python/filter_ingest_pymodule/ingest_callback_pymodule.cpp ================================================ /* * Fledge python module for filter plugin ingest callback * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <reading.h> #include <reading_set.h> #include <logger.h> #include <Python.h> #include <vector> #include <pythonreadingset.h> extern "C" { typedef void (*INGEST_CB_DATA)(void *, PythonReadingSet *); static void filter_plugin_async_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, PyObject *readingsObj); static PyObject *IngestError; /** * Implementation of data ingest into filters chain * * @param self The python module object * @param args Input arguments * @return PyObject of None type */ static PyObject *filter_ingest_callback(PyObject *self, PyObject *args) { PyObject *readingList; PyObject *callback; PyObject *ingestData; if (!PyArg_ParseTuple(args, "OOO", &callback, &ingestData, &readingList)) { Logger::getLogger()->error("Cannot parse input arguments " "of filter_ingest_callback C API module"); return NULL; } // Invoke callback routine filter_plugin_async_ingest_fn(callback, ingestData, readingList); Py_INCREF(Py_None); return Py_None; } static PyMethodDef FilterIngestMethods[] = { { "filter_ingest_callback", filter_ingest_callback, METH_VARARGS, "Invoke filter ingest callback" }, {NULL, NULL, 0, NULL} /* Sentinel */ }; static struct PyModuleDef filterIngestmodule = { PyModuleDef_HEAD_INIT, "filter_ingest", /* name of module */ NULL, /* module documentation, may be NULL */ -1, /* size of per-interpreter state of the module, or -1 if the module keeps state in global variables. */ FilterIngestMethods }; /** * Init the C API Python module */ PyMODINIT_FUNC PyInit_filter_ingest(void) { PyObject *m; m = PyModule_Create(&filterIngestmodule); if (m == NULL) { Logger::getLogger()->fatal("Cannot initialise filter_ingest C API module"); return NULL; } IngestError = PyErr_NewException("ingest.error", NULL, NULL); Py_INCREF(IngestError); PyModule_AddObject(m, "error", IngestError); return m; } /** * Ingest data into filters chain * * @param ingest_callback The callback routine * @param ingest_obj_ref_data Object parameter for callback routine * @param readingsObj Readongs data as PyObject */ void filter_plugin_async_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, PyObject *readingsObj) { if (ingest_callback == NULL || ingest_obj_ref_data == NULL || readingsObj == NULL) { Logger::getLogger()->error("PyC interface error: " "%s: " "filter_ingest_callback=%p, " "ingest_obj_ref_data=%p, " "readingsObj=%p", __FUNCTION__, ingest_callback, ingest_obj_ref_data, readingsObj); return; } PythonReadingSet *pyReadingSet = NULL; // Check we have a list of readings if (PyList_Check(readingsObj)) { try { // Get vector of Readings from Python object pyReadingSet = new PythonReadingSet(readingsObj); } catch (std::exception e) { Logger::getLogger()->warn("Unable to create a PythonReadingSet, error: %s", e.what()); pyReadingSet = NULL; } Logger::getLogger()->debug("%s:%d, pyReadingSet=%p, pyReadingSet readings count=%d", __FUNCTION__, __LINE__, pyReadingSet, pyReadingSet?pyReadingSet->getCount():0); } else { Logger::getLogger()->error("Filter did not return a Python List " "but object type %s", Py_TYPE(readingsObj)->tp_name); } // From: https://docs.python.org/3/c-api/arg.html // Note that any Python object references which are provided to the caller are borrowed references; // do not decrement their reference count! /*if(readingsObj) Py_CLEAR(readingsObj);*/ if (pyReadingSet) { // Get callback pointer INGEST_CB_DATA cb = (INGEST_CB_DATA) PyCapsule_GetPointer(ingest_callback, NULL); // Get ingest object parameter void *data = PyCapsule_GetPointer(ingest_obj_ref_data, NULL); Logger::getLogger()->debug("%s:%d: cb function at address %p", __FUNCTION__, __LINE__, *cb); // Invoke callback method for ReadingSet filter ingestion (*cb)(data, pyReadingSet); } else { Logger::getLogger()->error("PyC interface: plugin_ingest_fn: " "Got invalid ReadingSet while converting from PyObject"); } } }; // end of extern "C" block ================================================ FILE: C/services/filter-plugin-interfaces/python/python_plugin_interface.cpp ================================================ /* * Fledge filter plugin interface related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <config_category.h> #include <reading.h> #include <reading_set.h> #include <mutex> #include <plugin_handle.h> #include <pyruntime.h> #include <Python.h> #include <python_plugin_common_interface.h> #include <reading_set.h> #include <filter_plugin.h> #include <pythonreadingset.h> using namespace std; extern "C" { // This is a C++ ReadingSet class instance passed through typedef ReadingSet READINGSET; // Data handle passed to function pointer typedef void OUTPUT_HANDLE; // Function pointer called by "plugin_ingest" plugin method typedef void (*OUTPUT_STREAM)(OUTPUT_HANDLE *, READINGSET *); extern PLUGIN_INFORMATION *Py2C_PluginInfo(PyObject *); extern void logErrorMessage(); extern PLUGIN_INFORMATION *plugin_info_fn(); extern void plugin_shutdown_fn(PLUGIN_HANDLE); /** * Function to invoke 'plugin_reconfigure' function in python plugin * * @param handle The plugin handle from plugin_init_fn * @param config The new configuration, as string */ static void filter_plugin_reconfigure_fn(PLUGIN_HANDLE handle, const std::string& config) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: filter_plugin_reconfigure_fn(): " "handle is NULL"); return; } if (!pythonHandles) { // Plugin name can not be logged here Logger::getLogger()->error("pythonHandles map is NULL " "in filter_plugin_reconfigure_fn"); return; } // Look for Python module, handle is the key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second) { // Plugin name can not be logged here Logger::getLogger()->fatal("filter_plugin_reconfigure_fn(): " "pModule is NULL, handle %p", handle); return; } // We have plugin name string pName = it->second->m_name; std::mutex mtx; PyObject* pFunc; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("plugin_handle: plugin_reconfigure(): " "pModule=%p, *handle=%p, plugin '%s'", it->second->m_module, handle, pName.c_str()); Logger::getLogger()->debug("%s:%d: calling set_loglevel_in_python_module(), loglevel=%s", __FUNCTION__, __LINE__, Logger::getLogger()->getMinLevel().c_str()); if(config.compare("logLevel") == 0) { set_loglevel_in_python_module(it->second->m_module, it->second->m_name+" filter_plugin_reconf"); PyGILState_Release(state); return; } // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_reconfigure"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_reconfigure' " "in loaded python module '%s'", pName.c_str()); PyGILState_Release(state); return; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method plugin_reconfigure " "in loaded python module '%s'", pName.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } Logger::getLogger()->debug("plugin_reconfigure with %s", config.c_str()); PyObject *config_dict = json_loads(config.c_str()); // Call Python method passing an object and JSON config dict PyObject* pReturn = PyObject_CallFunction(pFunc, "OO", handle, config_dict); Py_CLEAR(pFunc); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_reconfigure " ": error while getting result object, plugin '%s'", pName.c_str()); logErrorMessage(); } else { Logger::getLogger()->info("%s:%d: Py_TYPE(pReturn)->tp_name=%s", __FUNCTION__, __LINE__, Py_TYPE(pReturn)->tp_name); PyObject* tmp = (PyObject *)handle; // Check current handle is Dict and pReturn is a Dict too if (PyDict_Check(tmp) && PyDict_Check(pReturn)) { // Clear Dict content PyDict_Clear(tmp); // Populate handle Dict with new data in pReturn PyDict_Update(tmp, pReturn); // Remove pReturn ojbect Py_CLEAR(pReturn); Logger::getLogger()->debug("plugin_handle: plugin_reconfigure(): " "got updated handle from python plugin=%p, plugin '%s'", handle, pName.c_str()); } else { Logger::getLogger()->error("plugin_handle: plugin_reconfigure(): " "got object type '%s' instead of Python Dict, " "python plugin=%p, plugin '%s'", Py_TYPE(pReturn)->tp_name, handle, pName.c_str()); } } PyGILState_Release(state); } /** * Ingest data into filters chain * * @param handle The plugin handle returned from plugin_init * @param data The ReadingSet data to filter */ void filter_plugin_ingest_fn(PLUGIN_HANDLE handle, READINGSET *data) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: filter_plugin_ingest_fn(): " "handle is NULL"); return; } if (!pythonHandles) { // Plugin name can not be logged here Logger::getLogger()->error("pythonHandles map is NULL " "in filter_plugin_ingest_fn"); return; } auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second) { // Plugin name can not be logged here Logger::getLogger()->fatal("plugin_handle: plugin_ingest(): " "pModule is NULL"); return; } // We have plugin name string pName = it->second->m_name; PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_ingest"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_ingest' " "method in loaded python module '%s'", pName.c_str()); PyGILState_Release(state); return; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method plugin_ingest" "in loaded python module '%s'", pName.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } // Call asset tracker // int i=0; vector<Reading *>* readings = ((ReadingSet *)data)->getAllReadingsPtr(); for (vector<Reading *>::const_iterator elem = readings->begin(); elem != readings->end(); ++elem) { // Logger::getLogger()->debug("Reading %d: %s", i++, (*elem)->toJSON().c_str()); AssetTracker* atr = AssetTracker::getAssetTracker(); if (atr) { AssetTracker::getAssetTracker()->addAssetTrackingTuple(it->second->getCategoryName(), (*elem)->getAssetName(), string("Filter")); } } Logger::getLogger()->debug("C2Py: filter_plugin_ingest_fn():L%d: data->getCount()=%d", __LINE__, data->getCount()); // Create a readingList of readings to be filtered PythonReadingSet *pyReadingSet = (PythonReadingSet *) data; PyObject* readingsList = pyReadingSet->toPython(); PyObject* pReturn = PyObject_CallFunction(pFunc, "OO", handle, readingsList); Py_CLEAR(pFunc); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_ingest " ": error while getting result object, plugin '%s'", pName.c_str()); logErrorMessage(); } data->removeAll(); delete data; #if 0 PythonReadingSet *filteredReadingSet = NULL; if (pReturn) { // Check we have a list of readings if (PyList_Check(readingsList)) { try { // Create ReadingSet from Python reading list filteredReadingSet = new PythonReadingSet(readingsList); // Remove input data data->removeAll(); // Append filtered readings; append will empty the passed reading set as well data->append(filteredReadingSet); delete filteredReadingSet; filteredReadingSet = NULL; } catch (std::exception e) { Logger::getLogger()->warn("Unable to create a PythonReadingSet, error: %s", e.what()); filteredReadingSet = NULL; } } else { Logger::getLogger()->error("Filter did not return a Python List " "but object type %s", Py_TYPE(readingsList)->tp_name); } } #endif // Remove readings to dict Py_CLEAR(readingsList); // Remove CallFunction result Py_CLEAR(pReturn); // Release GIL PyGILState_Release(state); } /** * Initialise the plugin, called to get the plugin handle and setup the * output handle that will be passed to the output stream. The output stream * is merely a function pointer that is called with the output handle and * the new set of readings generated by the plugin. * (*output)(outHandle, readings); * Note that the plugin may not call the output stream if the result of * the filtering is that no readings are to be sent onwards in the chain. * This allows the plugin to discard data or to buffer it for aggregation * with data that follows in subsequent calls * * @param config The configuration category for the filter * @param outHandle A handle that will be passed to the output stream * @param output The output stream (function pointer) to which data is passed * @return An opaque handle that is used in all subsequent calls to the plugin */ PLUGIN_HANDLE filter_plugin_init_fn(ConfigCategory* config, OUTPUT_HANDLE *outHandle, OUTPUT_STREAM output) { // Get pluginName string pName = config->getValue("plugin"); Logger::getLogger()->info("filter_plugin_init_fn(): pName=%s", pName.c_str()); if (!pythonModules) { Logger::getLogger()->error("pythonModules map is NULL " "in filter_plugin_init_fn, plugin '%s'", pName.c_str()); return NULL; } bool loadModule = false; // whether module is already loaded bool reloadModule = false; // whether module is to be loaded again bool pythonInitState = false; PythonModule *module = NULL; // Check whether plugin pName has been already loaded for (auto h = pythonHandles->begin(); h != pythonHandles->end(); ++h) { if (h->second->m_name.compare(pName) == 0) { Logger::getLogger()->info("filter_plugin_init_fn: already loaded " "a plugin with name '%s'. A new Python obj is needed", pName.c_str()); // Set Python library loaded state pythonInitState = h->second->m_init; // Set load indicator loadModule = true; break; } } if (!loadModule) { Logger::getLogger()->info("filter_plugin_init_fn: NOT already loaded " "a plugin with name '%s'. A new Python obj is needed", pName.c_str()); // Plugin name not previously loaded: check current Python module // pName is the key auto it = pythonModules->find(pName); if (it == pythonModules->end()) { Logger::getLogger()->info("plugin_handle: filter_plugin_init(): " "pModule not found for plugin '%s': ", pName.c_str()); // Set reload indicator reloadModule = true; } else { Logger::getLogger()->info("plugin_handle: filter_plugin_init(): " "pModule FOUND for plugin '%s': ", pName.c_str()); if (it->second && it->second->m_module) { // Just use current loaded module: no load or re-load action module = it->second; Logger::getLogger()->info("plugin_handle: filter_plugin_init(): " "module set to PythonModule object @ address %p", module); // Set Python library loaded state pythonInitState = it->second->m_init; } else { Logger::getLogger()->fatal("plugin_handle: filter_plugin_init(): " "found pModule is NULL for plugin '%s': ", pName.c_str()); return NULL; } } } Logger::getLogger()->info("filter_plugin_init_fn: loadModule=%s, reloadModule=%s", loadModule?"TRUE":"FALSE", reloadModule?"TRUE":"FALSE"); // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); // Import Python module if (loadModule || reloadModule) { string fledgePythonDir; string fledgeRootDir(getenv("FLEDGE_ROOT")); fledgePythonDir = fledgeRootDir + "/python"; // Set Python path for embedded Python 3.x // Get current sys.path - borrowed reference PyObject* sysPath = PySys_GetObject((char *)"path"); PyList_Append(sysPath, PyUnicode_FromString((char *) fledgePythonDir.c_str())); // Set sys.argv for embedded Python 3.x int argc = 2; wchar_t* argv[2]; argv[0] = Py_DecodeLocale("", NULL); argv[1] = Py_DecodeLocale(pName.c_str(), NULL); // Set script parameters PySys_SetArgv(argc, argv); Logger::getLogger()->debug("%s_plugin_init_fn, %sloading plugin '%s', ", PLUGIN_TYPE_FILTER, reloadModule ? "re-" : "", pName.c_str()); // Import Python script PyObject *newObj = PyImport_ImportModule(pName.c_str()); // Check for NULL if (newObj) { PythonModule* newModule; if ((newModule = new PythonModule(newObj, pythonInitState, pName, PLUGIN_TYPE_FILTER, NULL)) == NULL) { // Release lock PyGILState_Release(state); Logger::getLogger()->fatal("plugin_handle: filter_plugin_init(): " "failed to create Python module " "object, plugin '%s'", pName.c_str()); return NULL; } // Set category name newModule->setCategoryName(config->getName()); // Set module module = newModule; } else { logErrorMessage(); // Release lock PyGILState_Release(state); Logger::getLogger()->fatal("plugin_handle: filter_plugin_init(): " "failed to import plugin '%s'", pName.c_str()); return NULL; } } else { // Set category name module->setCategoryName(config->getName()); } Logger::getLogger()->info("filter_plugin_init_fn for '%s', pModule '%p', " "Python interpreter '%p', config=%s", module->m_name.c_str(), module->m_module, module->m_tState, config->itemsToJSON().c_str()); Logger::getLogger()->debug("%s:%d: calling set_loglevel_in_python_module(), loglevel=%s", __FUNCTION__, __LINE__, Logger::getLogger()->getMinLevel().c_str()); set_loglevel_in_python_module(module->m_module, module->m_name + " plugin_init"); PyObject *config_dict = json_loads(config->itemsToJSON().c_str()); // Call Python method passing an object PyObject* ingest_fn = PyCapsule_New((void *)output, NULL, NULL); PyObject* ingest_ref = PyCapsule_New((void *)outHandle, NULL, NULL); PyObject* pReturn = PyObject_CallMethod(module->m_module, "plugin_init", "OOO", config_dict, ingest_ref, ingest_fn); Py_CLEAR(ingest_ref); Py_CLEAR(ingest_fn); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_init " ": error while getting result object, plugin '%s'", pName.c_str()); logErrorMessage(); } else { Logger::getLogger()->info("plugin_handle: filter_plugin_init(): " "got result object '%p', plugin '%s'", pReturn, pName.c_str()); } // Add the handle to handles map as key, PythonModule object as value std::pair<std::map<PLUGIN_HANDLE, PythonModule*>::iterator, bool> ret; if (pythonHandles) { // Add to handles map the PythonHandles object ret = pythonHandles->insert(pair<PLUGIN_HANDLE, PythonModule*> ((PLUGIN_HANDLE)pReturn, module)); if (ret.second) { Logger::getLogger()->debug("plugin_handle: filter_plugin_init_fn(): " "handle %p of python plugin '%s' " "added to pythonHandles map", pReturn, pName.c_str()); } else { Logger::getLogger()->error("plugin_handle: filter_plugin_init_fn(): " "failed to insert handle %p of " "python plugin '%s' to pythonHandles map", pReturn, pName.c_str()); Py_CLEAR(module->m_module); module->m_module = NULL; delete module; module = NULL; Py_CLEAR(pReturn); pReturn = NULL; } } // Release locks PyGILState_Release(state); return pReturn ? (PLUGIN_HANDLE) pReturn : NULL; } /** * Constructor for PythonPluginHandle * - Load python interpreter * - Set sys.path and sys.argv * * @param pluginName The plugin name to load * @param pluginPathName The plugin pathname * @return PyObject of loaded module */ void *PluginInterfaceInit(const char *pluginName, const char * pluginPathName) { bool initPython = true; // Set plugin name, also for methods in common-plugin-interfaces/python gPluginName = pluginName; string fledgePythonDir; string fledgeRootDir(getenv("FLEDGE_ROOT")); fledgePythonDir = fledgeRootDir + "/python"; string filtersRootPath = fledgePythonDir + string(R"(/fledge/plugins/filter/)") + string(pluginName); Logger::getLogger()->info("%s:%d:, filtersRootPath=%s", __FUNCTION__, __LINE__, filtersRootPath.c_str()); PythonRuntime::getPythonRuntime(); // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->info("FilterPlugin PluginInterfaceInit %s:%d: " "fledgePythonDir=%s, plugin '%s'", __FUNCTION__, __LINE__, fledgePythonDir.c_str(), pluginName); // Set Python path for embedded Python 3.x // Get current sys.path - borrowed reference PyObject* sysPath = PySys_GetObject((char *)"path"); PyList_Append(sysPath, PyUnicode_FromString((char *) filtersRootPath.c_str())); PyList_Append(sysPath, PyUnicode_FromString((char *) fledgePythonDir.c_str())); // Set sys.argv for embedded Python 3.5 int argc = 2; wchar_t* argv[2]; argv[0] = Py_DecodeLocale("", NULL); argv[1] = Py_DecodeLocale(pluginName, NULL); PySys_SetArgv(argc, argv); // 2) Import Python script PyObject *pModule = PyImport_ImportModule(pluginName); Logger::getLogger()->info("%s:%d: pluginName=%s, pModule=%p", __FUNCTION__, __LINE__, pluginName, pModule); // Check whether the Python module has been imported if (!pModule) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("FilterPlugin PluginInterfaceInit: " "cannot import Python module file " "from '%s', plugin '%s'", pluginPathName, pluginName); } else { std::pair<std::map<string, PythonModule*>::iterator, bool> ret; PythonModule* newModule = NULL; if (pythonModules) { // Add module into pythonModules, pluginName is the key if ((newModule = new PythonModule(pModule, initPython, string(pluginName), PLUGIN_TYPE_FILTER, NULL)) == NULL) { // Release lock PyGILState_Release(state); Logger::getLogger()->fatal("plugin_handle: filter_plugin_init(): " "failed to create Python module " "object, plugin '%s'", pluginName); return NULL; } ret = pythonModules->insert(pair<string, PythonModule*> (string(pluginName), newModule)); Logger::getLogger()->info("%s:%d: Added pair to pythonModules: <%s, %p>", __FUNCTION__, __LINE__, pluginName, newModule); } // Check result if (!pythonModules || ret.second == false) { Logger::getLogger()->fatal("%s:%d: python module " "not added to the map " "of loaded plugins, " "pModule=%p, plugin '%s', aborting.", __FUNCTION__, __LINE__, pModule, pluginName); // Cleanup Py_CLEAR(pModule); pModule = NULL; delete newModule; newModule = NULL; } else { Logger::getLogger()->debug("%s:%d: python module " "successfully loaded, " "pModule=%p, plugin '%s'", __FUNCTION__, __LINE__, pModule, pluginName); } } // Release locks PyGILState_Release(state); // Return new Python module or NULL return pModule; } /** * Returns function pointer that can be invoked to call '_sym' function * in python plugin * * @param _sym Symbol name * @param pName Plugin name * @return function pointer to be invoked */ void *PluginInterfaceResolveSymbol(const char *_sym, const string& pName) { string sym(_sym); if (!sym.compare("plugin_info")) return (void *) plugin_info_fn; else if (!sym.compare("plugin_init")) return (void *) filter_plugin_init_fn; else if (!sym.compare("plugin_shutdown")) return (void *) plugin_shutdown_fn; else if (!sym.compare("plugin_reconfigure")) return (void *) filter_plugin_reconfigure_fn; else if (!sym.compare("plugin_ingest")) return (void *) filter_plugin_ingest_fn; else if (!sym.compare("plugin_start")) { Logger::getLogger()->debug("FilterPluginInterface currently " "does not support 'plugin_start', plugin '%s'", pName.c_str()); return NULL; } else { Logger::getLogger()->fatal("FilterPluginInterfaceResolveSymbol can not find symbol '%s' " "in the Filter Python plugin interface library, " "loaded plugin '%s'", _sym, pName.c_str()); return NULL; } } }; // End of extern C ================================================ FILE: C/services/north/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (North) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb -DPy_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(CMAKE_CXX_FLAGS_PROFILING "-O2 -pg") set(DLLIB -ldl) set(UUIDLIB -luuid) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) set(EXEC fledge.services.north) include_directories(. include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include ../common/include ../../common/include) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() file(GLOB north_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(${EXEC} ${north_src} ${common_src} ${services_src}) target_link_libraries(${EXEC} ${Boost_LIBRARIES}) target_link_libraries(${EXEC} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${EXEC} ${DLLIB}) target_link_libraries(${EXEC} ${UUIDLIB}) target_link_libraries(${EXEC} ${COMMON_LIB}) target_link_libraries(${EXEC} ${SERVICE_COMMON_LIB}) install(TARGETS ${EXEC} RUNTIME DESTINATION fledge/services) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(${EXEC} ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(${EXEC} ws2_32 wsock32) endif() endif() # Set profiling flags if 'Profiling' build if(CMAKE_BUILD_TYPE STREQUAL "Profiling") message("Building in Profiling mode") set_target_properties(${EXEC} PROPERTIES COMPILE_FLAGS "${CMAKE_CXX_FLAGS_PROFILING}") # define 'PROFILING' flag used by service to change directory target_compile_definitions(${EXEC} PRIVATE PROFILING=1) set(CMAKE_SHARED_LINKED_FLAGS "${CMAKE_SHARED_LINKED_FLAGS} -O2 -pg") target_link_libraries(${EXEC} -O2 -pg) endif() ================================================ FILE: C/services/north/README.rst ================================================ .. |br| raw:: html <br /> ********************* Fledge North Service ********************* This is the north service of the Fledge platform written in C. This service is responsible for sending the readings data onwards to the upstream systems. The service registers with the storage service to be given any new data as it arrives. |br| |br| Building ======== The Storage service is built using cmake, to build the Storage service: :: mkdir build cd build cmake .. make This will create the executable file ``north`` service. Use the command ``make install`` to install in the default location, note you will need permission on the installation directory or use the sudo command. Pass the option *DESTDIR=* to set your own destination into which to install the Storage service. Build the plugins by going to the directory *C/plugins/north* and follow the instructions in each of the plugin directories. |br| |br| Prerequisites ============= To build the North service the machine must have installed the *cmake* system, *make* and *g++*, plus the libraries for the North plugin, e.g. the boost libraries On Ubuntu based Linux distributions these can be installed with *apt-get*: :: apt-get install libboost-dev libboost-system-dev libboost-thread-dev apt-get install cmake g++ make |br| |br| Running ======= The North service may be run in daemon mode or interactively by use of the *-d* command line argument. The North service will register with the core to allow the core to monitor the North service and to allow the North storage to find the Storage service. It assumes the core is located on the same machine. This can however be overridden by the use of the command line argument *--port=* and *--address=* to set the port and address of the core microservice. The North service will look for North plugins in the current directory or in the directory *$FLEDGE_ROOT/plugins/north*. |br| |br| ================================================ FILE: C/services/north/data_load.cpp ================================================ /* * Fledge North Service Data Loading. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <data_load.h> #include <north_service.h> #define INITIAL_BLOCK_WAIT 10 #define MAX_WAIT_PERIOD 200 using namespace std; static void threadMain(void *arg) { DataLoad *dl = (DataLoad *)arg; dl->loadThread(); } /** * DataLoad Constructor * * Create and start the loading thread */ DataLoad::DataLoad(const string& name, long streamId, StorageClient *storage) : m_name(name), m_streamId(streamId), m_storage(storage), m_shutdown(false), m_readRequest(0), m_dataSource(SourceReadings), m_pipeline(NULL), m_perfMonitor(NULL), m_prefetchLimit(2), m_isolate(false), m_debuggerAttached(false), m_debuggerBufferSize(1), m_suspendIngest(false), m_steps(0) { m_blockSize = DEFAULT_BLOCK_SIZE; if (m_streamId == 0) { m_streamId = createNewStream(); } m_nextStreamUpdate = 1; m_streamUpdate = 1; m_lastFetched = getLastSentId(); m_streamSent = getLastSentId(); m_flushRequired = false; m_thread = new thread(threadMain, this); loadFilters(name); } /** * DataLoad destructor * * Shutdown and wait for the loading thread */ DataLoad::~DataLoad() { // Request the loading thread to shutdown and wait for it Logger::getLogger()->info("Data load shutdown in progress"); m_shutdown = true; m_cv.notify_all(); m_fetchCV.notify_all(); m_thread->join(); delete m_thread; if (m_pipeline) { m_pipeline->cleanupFilters(m_name); delete m_pipeline; } if (m_flushRequired) { flushLastSentId(); } // Clear out the queue of readings unique_lock<mutex> lck(m_qMutex); // Should not need to do this while (! m_queue.empty()) { ReadingSet *readings = m_queue.front(); delete readings; m_queue.pop_front(); } Logger::getLogger()->info("Data load shutdown complete"); } /** * External call to shutdown the north service */ void DataLoad::shutdown() { m_shutdown = true; m_cv.notify_all(); m_fetchCV.notify_all(); } /** * External call to restart the north service */ void DataLoad::restart() { shutdown(); } /** * Set the source of data for the service * * @param source The data source */ bool DataLoad::setDataSource(const string& source) { if (source.compare("statistics") == 0) m_dataSource = SourceStatistics; else if (source.compare("readings") == 0) m_dataSource = SourceReadings; else if (source.compare("audit") == 0) m_dataSource = SourceAudit; else { Logger::getLogger()->error("Unsupported source '%s' for north service '%s'", source.c_str(), m_name.c_str()); return false; } return true; } /** * The background thread that loads data from the database */ void DataLoad::loadThread() { while (!m_shutdown) { unsigned int block = waitForReadRequest(); while (m_shutdown == false && m_queue.size() < m_prefetchLimit) { // Read another block if we have less than // the prefetch limit already queued readBlock(block); } } } /** * Wait for a read request to be made. Read requests come from consumer * threads calling the triggerRead call that will cause a block of reading * data (or whatever the source of data is) to be added to the reading * buffer. * * @return int The size of the block to read */ unsigned int DataLoad::waitForReadRequest() { unique_lock<mutex> lck(m_mutex); while (m_shutdown == false && m_readRequest == 0) { m_cv.wait(lck); } unsigned int rval = m_readRequest; m_readRequest = 0; Logger::getLogger()->debug("DataLoad received read request for %d readings", rval); return rval; } /** * Trigger the loading thread to read a block of data. This is called by * any thread to request that data be added to the buffer ready for collection. */ void DataLoad::triggerRead(unsigned int blockSize) { unique_lock<mutex> lck(m_mutex); m_readRequest = blockSize; m_cv.notify_all(); } /** * Read a block of readings, statistics or audit date from the storage service * * @param blockSize The number of readings to fetch */ void DataLoad::readBlock(unsigned int blockSize) { int n_waits = 0; int n_update_streamId = 0; int max_wait_count = 5; // Maximum wait counter to update streams table unsigned int waitPeriod = INITIAL_BLOCK_WAIT; if (m_suspendIngest && willStep()) { lock_guard<mutex> guard(m_suspendMutex); blockSize = m_steps; m_steps = 0; } else if (m_suspendIngest) { return; } do { ReadingSet* readings = nullptr; try { switch (m_dataSource) { case SourceReadings: // Logger::getLogger()->debug("Fetch %d readings from %d", blockSize, m_lastFetched + 1); readings = m_storage->readingFetch(m_lastFetched + 1, blockSize); break; case SourceStatistics: readings = fetchStatistics(blockSize); break; case SourceAudit: readings = fetchAudit(blockSize); break; default: Logger::getLogger()->fatal("Bad source for data to send"); break; } } catch (ReadingSetException* e) { // Ignore, the exception has been reported in the layer below // readings may contain erroneous data, clear it readings = nullptr; } catch (exception& e) { // Ignore, the exception has been reported in the layer below // readings may contain erroneous data, clear it readings = nullptr; } if (readings && readings->getCount()) { n_update_streamId = 0; m_lastFetched = readings->getLastId(); Logger::getLogger()->debug("DataLoad::readBlock(): Got %lu readings from storage client, updated m_lastFetched=%lu", readings->getCount(), m_lastFetched); bufferReadings(readings); if (m_perfMonitor) { m_perfMonitor->collect("No of waits for data", n_waits); m_perfMonitor->collect("Block utilisation %", (long)((readings->getCount() * 100) / blockSize)); } return; } else if (readings) { // Delete the empty readings set delete readings; n_update_streamId++; if (n_update_streamId > max_wait_count) { // Update 'last_object_id' in 'streams' table when no readings to send n_update_streamId = 0; flushLastSentId(); } } else { // Logger::getLogger()->debug("DataLoad::readBlock(): No readings available"); } if (!m_shutdown) { // TODO improve this this_thread::sleep_for(chrono::milliseconds(waitPeriod)); waitPeriod *= 2; if (waitPeriod > MAX_WAIT_PERIOD) waitPeriod = MAX_WAIT_PERIOD; n_waits++; } } while (m_shutdown == false); } /** * Fetch data from the statistics history table * * @param blockSize Number of records to fetch * @return ReadingSet* A set of readings */ ReadingSet *DataLoad::fetchStatistics(unsigned int blockSize) { const Condition conditionId(GreaterThan); // WHERE id > lastId Where* wId = new Where("id", conditionId, to_string(m_lastFetched)); vector<Returns *> columns; // Add colums and needed aliases columns.push_back(new Returns("id")); columns.push_back(new Returns("key", "asset_code")); columns.push_back(new Returns("ts")); Returns *tmpReturn = new Returns("history_ts", "user_ts"); tmpReturn->timezone("utc"); columns.push_back(tmpReturn); columns.push_back(new Returns("value")); // Build the query with fields, aliases and where Query qStatistics(columns, wId); // Set limit qStatistics.limit(blockSize); // Set sort Sort* sort = new Sort("id"); qStatistics.sort(sort); // Query the statistics_history table and get a ReadingSet result return m_storage->queryTableToReadings("statistics_history", qStatistics); } /** * Fetch data from the audit log table * * @param blockSize Number of records to fetch * @return ReadingSet* A set of readings */ ReadingSet *DataLoad::fetchAudit(unsigned int blockSize) { const Condition conditionId(GreaterThan); // WHERE id > lastId Where* wId = new Where("id", conditionId, to_string(m_lastFetched)); vector<Returns *> columns; // Add colums and needed aliases columns.push_back(new Returns("id")); columns.push_back(new Returns("code", "asset_code")); columns.push_back(new Returns("ts")); Returns *tmpReturn = new Returns("ts", "user_ts"); tmpReturn->timezone("utc"); columns.push_back(tmpReturn); columns.push_back(new Returns("log", "reading")); // Build the query with fields, aliases and where Query qStatistics(columns, wId); // Set limit qStatistics.limit(blockSize); // Set sort Sort* sort = new Sort("id"); qStatistics.sort(sort); // Query the audit table and get a ReadingSet result return m_storage->queryTableToReadings("log", qStatistics); } /** * Get the ID of the last reading that was sent with this service */ unsigned long DataLoad::getLastSentId() { const Condition conditionId(Equals); string streamId = to_string(m_streamId); Where* wStreamId = new Where("id", conditionId, streamId); // SELECT * FROM fledge.streams WHERE id = x Query qLastId(wStreamId); ResultSet* lastObjectId = m_storage->queryTable("streams", qLastId); if (lastObjectId != NULL && lastObjectId->rowCount()) { // Get the first row only ResultSet::RowIterator it = lastObjectId->firstRow(); // Access the element ResultSet::Row* row = *it; if (row) { // Get column value ResultSet::ColumnValue* theVal = row->getColumn("last_object"); // Set found id unsigned long rval = (unsigned long)theVal->getInteger(); delete lastObjectId; return rval; } } // Free result set delete lastObjectId; return 0; } /** * Buffer a block of readings. Called after a block of data has been * read to add that block to the queue reading for collection by the * consuming thread. * * @param readings The readings to buffer */ void DataLoad::bufferReadings(ReadingSet *readings) { if (m_pipeline) { PipelineElement *firstElement = m_pipeline->getFirstFilterPlugin(); if (firstElement) { // Check whether filters are set before calling ingest while (!m_pipeline->isReady()) { Logger::getLogger()->warn("Ingest called before " "filter pipeline is ready"); std::this_thread::sleep_for(std::chrono::milliseconds(150)); } m_pipeline->execute(); // Pass readingSet to filter chain firstElement->ingest(readings); m_pipeline->completeBranch(); // Main branch has completed m_pipeline->awaitCompletion(); return; } } unique_lock<mutex> lck(m_qMutex); m_queue.push_back(readings); if (m_perfMonitor && m_perfMonitor->isCollecting()) { m_perfMonitor->collect("Readings added to buffer", (long)(readings->getCount())); m_perfMonitor->collect("Reading sets buffered", (long)(m_queue.size())); unsigned long i = 0; for (auto& set : m_queue) i += set->getCount(); m_perfMonitor->collect("Total readings buffered", (long)i); } Logger::getLogger()->debug("Buffered %d readings for north processing", readings->getCount()); m_fetchCV.notify_all(); } /** * Fetch Readings * * @param wait Boolean to determine if the call should block the calling thread * @return ReadingSet* Return a block of readings from the buffer */ ReadingSet *DataLoad::fetchReadings(bool wait) { unique_lock<mutex> lck(m_qMutex); while (m_shutdown == false && m_queue.empty()) { if (m_perfMonitor && m_perfMonitor->isCollecting()) { m_perfMonitor->collect("No data available to fetch", 1); } triggerRead(m_blockSize); if (wait && !m_shutdown) { m_fetchCV.wait(lck); } else { return NULL; } } ReadingSet *rval = NULL; if (!m_queue.empty()) { rval = m_queue.front(); m_queue.pop_front(); } if (m_queue.size() < m_prefetchLimit && m_shutdown == false) // Read another block if we have less than 5 already queued { triggerRead(m_blockSize); } return rval; } /** * Creates a new stream, it adds a new row into the streams table allocating a new stream id * * @return newly created stream, 0 otherwise */ int DataLoad::createNewStream() { int streamId = 0; InsertValues streamValues; streamValues.push_back(InsertValue("description", m_name)); streamValues.push_back(InsertValue("last_object", 0)); if (m_storage->insertTable("streams", streamValues) != 1) { Logger::getLogger()->error("Failed to insert a row into the streams table"); } else { // Select the row just created, having description='process name' const Condition conditionId(Equals); Where* wName = new Where("description", conditionId, m_name); Query qName(wName); ResultSet *rows = m_storage->queryTable("streams", qName); if (rows != NULL && rows->rowCount()) { // Get the first row only ResultSet::RowIterator it = rows->firstRow(); // Access the element ResultSet::Row* row = *it; if (row) { // Get column value ResultSet::ColumnValue* theVal = row->getColumn("id"); streamId = (int)theVal->getInteger(); } } delete rows; } NorthService::getMgmtClient()->setCategoryItemValue(m_name, "streamId", to_string(streamId)); return streamId; } /** * Update the last sent ID for our stream */ void DataLoad::updateLastSentId(unsigned long id) { m_streamSent = id; m_flushRequired = true; if (m_nextStreamUpdate-- <= 0) { flushLastSentId(); m_nextStreamUpdate = m_streamUpdate; } } /** * Flush the last sent Id to the storeage layer */ void DataLoad::flushLastSentId() { const Condition condition(Equals); Where where("id", condition, to_string(m_streamId)); InsertValues lastId; lastId.push_back(InsertValue("last_object", (long)m_streamSent)); m_storage->updateTable("streams", lastId, where); } /** * Load filter plugins * * Filters found in configuration are loaded * and add to the data load class instance * * @param categoryName Configuration category name * @return True if filters were loaded and initialised * or there are no filters * False with load/init errors */ bool DataLoad::loadFilters(const string& categoryName) { Logger::getLogger()->info("loadFilters: categoryName=%s", categoryName.c_str()); /* * We do everything to setup the pipeline using a local FilterPipeline and then assign it * to the service m_filterPipeline once it is setup to guard against access to the pipeline * during setup. * This should not be an issue if the mutex is held, however this approach lessens the risk * in the case of this routine being called when the mutex is not held and ensure m_filterPipeline * only ever points to a fully configured filter pipeline. */ ManagementClient *management = NorthService::getMgmtClient(); lock_guard<mutex> guard(m_pipelineMutex); FilterPipeline *filterPipeline = new FilterPipeline(management, *m_storage, m_name); // Try to load filters: if (!filterPipeline->loadFilters(categoryName)) { // Return false on any error return false; } // Set up the filter pipeline bool rval = filterPipeline->setupFiltersPipeline((void *)passToOnwardFilter, (void *)pipelineEnd, this); if (rval) { m_pipeline = filterPipeline; // If we previously had a debugger attached then attach to the new pipeline if (m_debuggerAttached) { attachDebugger(); setDebuggerBuffer(m_debuggerBufferSize); } } else { Logger::getLogger()->error("Failed to setup the filter pipeline, the filters are not attached to the service"); filterPipeline->cleanupFilters(categoryName); } return rval; } /** * Pass the current readings set to the next filter in the pipeline * * Note: * This routine must be passed to all filters "plugin_init" except the last one * * Static method * * @param outHandle Pointer to next filter * @param readings Current readings set */ void DataLoad::passToOnwardFilter(OUTPUT_HANDLE *outHandle, READINGSET *readingSet) { // Get next element in the pipeline PipelineElement *next = (PipelineElement *)outHandle; // Pass readings to next filter next->ingest(readingSet); } /** * Use the current readings (they have been filtered * by all filters) * * The assumption is that one of two things has happened. * * 1. The filtering has all been done in place. In which case * the m_data vector is in the ReadingSet passed in here. * * 2. The filtering has created new ReadingSet in which case * the reading vector must be copied into m_data from the * ReadingSet. * * Note: * This routine must be passed to last filter "plugin_init" only * * Static method * * @param outHandle Pointer to DataLoad class * @param readingSet Filtered reading set being added to Ingest::m_data */ void DataLoad::pipelineEnd(OUTPUT_HANDLE *outHandle, READINGSET *readingSet) { DataLoad *load = (DataLoad *)outHandle; if (load->isolated()) { delete readingSet; return; } std::vector<Reading *>* vecPtr = readingSet->getAllReadingsPtr(); unsigned long lastReadingId = 0; for(auto rdngPtrItr = vecPtr->crbegin(); rdngPtrItr != vecPtr->crend(); rdngPtrItr++) { if((*rdngPtrItr)->hasId()) // only consider valid reading IDs { lastReadingId = (*rdngPtrItr)->getId(); break; } } Logger::getLogger()->debug("DataLoad::pipelineEnd(): readingSet->getCount()=%d, lastReadingId=%lu, " "load->m_lastFetched=%lu", readingSet->getCount(), lastReadingId, load->m_lastFetched); // Special case when all readings are filtered out // or new readings are appended by filter with id 0 if ((readingSet->getCount() == 0) || (lastReadingId == 0)) { Logger::getLogger()->debug("DataLoad::pipelineEnd(): updating with load->updateLastSentId(%d)", load->m_lastFetched); load->updateLastSentId(load->m_lastFetched); } unique_lock<mutex> lck(load->m_qMutex); load->m_queue.push_back(readingSet); load->m_fetchCV.notify_all(); } /** * Configuration change for one of the filters or to the pipeline. * * @param category The name of the configuration category * @param newConfig The new category contents */ void DataLoad::configChange(const string& category, const string& newConfig) { Logger::getLogger()->debug("DataLoad::configChange(): category=%s, newConfig=%s", category.c_str(), newConfig.c_str()); if (category == m_name) { /** * The category that has changed is the one for the north service itself. * The only items that concerns us here is the filter item that defines * the filter pipeline and the data source. If the item is the filter pipeline * we extract that item and check to see if it defines a pipeline that is * different to the one we currently have. * * If it is the filter pipeline we destroy the current pipeline and create a new one. */ ConfigCategory config("tmp", newConfig); if (config.itemExists("source")) { setDataSource(config.getValue("source")); } string newPipeline = ""; if (config.itemExists("filter")) { newPipeline = config.getValue("filter"); } { lock_guard<mutex> guard(m_pipelineMutex); if (m_pipeline) { if (newPipeline == "" || m_pipeline->hasChanged(newPipeline) == false) { Logger::getLogger()->info("DataLoad::configChange(): " "filter pipeline is not set or " "it hasn't changed"); return; } /* The new filter pipeline is different to what we have already running * So remove the current pipeline and recreate. */ Logger::getLogger()->info("DataLoad::configChange(): " "filter pipeline has changed, " "recreating filter pipeline"); m_pipeline->cleanupFilters(m_name); delete m_pipeline; m_pipeline = NULL; } } /* * We have to setup a new pipeline to match the changed configuration. * Release the lock before reloading the filters as this will acquire * the lock again */ loadFilters(category); lock_guard<mutex> guard(m_pipelineMutex); } else { /* * The category is for one fo the filters. We simply call the Filter Pipeline * instance and get it to deal with sending the configuration to the right filter. * This is done holding the pipeline mutex to prevent the pipeline being changed * during this call and also to hold the ingest thread from running the filters * during reconfiguration. */ Logger::getLogger()->info("DataLoad::configChange(): change to config of some filter(s)"); lock_guard<mutex> guard(m_pipelineMutex); if (m_pipeline) { m_pipeline->configChange(category, newConfig); } } } ================================================ FILE: C/services/north/data_send.cpp ================================================ /* * Fledge North Service Data Loading. * * Copyright (c) 2020, 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <data_sender.h> #include <data_load.h> #include <north_service.h> #include <reading.h> using namespace std; /** * Start the sending thread within the DataSender class * * @param data The instance of the class DataSender */ static void startSenderThread(void *data) { DataSender *sender = (DataSender *)data; sender->sendThread(); } /** * Thread to update statistics table in DB */ static void statsThread(DataSender *sender) { while (sender->isRunning()) { sender->flushStatistics(); } } /** * Constructor for the data sending class */ DataSender::DataSender(NorthPlugin *plugin, DataLoad *loader, NorthService *service) : m_plugin(plugin), m_loader(loader), m_service(service), m_shutdown(false), m_paused(false), m_sending(false), m_perfMonitor(NULL), m_repeatedFailure(0) { m_statsUpdateFails = 0; m_logger = Logger::getLogger(); // Create statistics rows if not existant if (createStats("Readings Sent", 0)) { m_statsDbEntriesCache.insert("Readings Sent"); } if (createStats(m_loader->getName(), 0)) { m_statsDbEntriesCache.insert(m_loader->getName()); } /* * Start the thread. Everything must be initialsied * before the thread is started */ m_thread = new thread(startSenderThread, this); m_statsThread = new thread(statsThread, this); } /** * Destructor for the data sender class */ DataSender::~DataSender() { m_logger->info("DataSender shutdown in progress"); { lock_guard<std::mutex> lck(m_backoffMutex); m_shutdown = true; } // Wakeup any sleep sender thread m_backoffCV.notify_all(); m_thread->join(); delete m_thread; m_statsCv.notify_one(); m_logger->debug("DataSender stats thread notified"); m_statsThread->join(); m_logger->debug("DataSender stats thread joined"); delete m_statsThread; m_logger->info("DataSender shutdown complete"); } /** * The sending thread entry point */ void DataSender::sendThread() { if(isDryRun()) return; ReadingSet *readings = nullptr; while (!m_shutdown) { if (readings == NULL) { readings = m_loader->fetchReadings(true); } if (!readings) { m_logger->warn( "Sending thread closing down after failing to fetch readings"); return; } bool removeReadings = false; if (m_shutdown == false && readings->getCount() > 0) { unsigned long lastSent = send(readings); if (lastSent) { m_loader->updateLastSentId(lastSent); // Check all readings sent vector<Reading *> *vec = readings->getAllReadingsPtr(); // Set readings removal removeReadings = vec->size() == 0; } } else if (m_shutdown == false) { // All readings filtered out Logger::getLogger()->debug("All readings filtered out"); // Get last read item from the readings database unsigned long lastRead = m_loader->getLastFetched(); // Update LastSentId in streams table m_loader->updateLastSentId(lastRead); // Set readings removal removeReadings = true; } else { readings = NULL; } // Remove readings object if needed if (removeReadings) { delete readings; readings = NULL; } } if (readings) { // Rremove any readings we had failed to send before shutting down delete readings; } m_logger->info("Sending thread shutdown"); } /** * Send a block of readings * * @param readings The readings to send * @return long The ID of the last reading sent */ unsigned long DataSender::send(ReadingSet *readings) { blockPause(); uint32_t to_send = readings->getCount(); uint32_t sent = m_plugin->send(readings->getAllReadings()); releasePause(); if (to_send > 0 && sent == 0) { m_repeatedFailure++; // We had readings to send but sent known. This could be as a result // of a failed connection north or a bad configuration, we have no way // to tell. If we take no action we will continue to use lots of CPU // and load the system. We instigate a backoff strategy here to try // to keep some CPU available for other tasks if (m_repeatedFailure == FAILURE_BACKOFF_THRESHOLD) { m_service->alertFailures(); } if (m_repeatedFailure > FAILURE_BACKOFF_THRESHOLD) { m_sendBackoffTime = m_repeatedFailure * MIN_SEND_BACKOFF / FAILURE_BACKOFF_THRESHOLD; if (m_sendBackoffTime > MAX_SEND_BACKOFF) { m_sendBackoffTime = MAX_SEND_BACKOFF; } { unique_lock<mutex> lk(m_backoffMutex); m_backoffCV.wait_for(lk, chrono::milliseconds(m_sendBackoffTime)); } if (m_shutdown) { return 0; } } } else if (sent > 0) { if (m_repeatedFailure >= FAILURE_BACKOFF_THRESHOLD) { m_service->clearFailures(); } // Reset the backoff and continue at full rate m_repeatedFailure = 0; } // last few readings in the reading set may have 0 reading ID, // if they have been generated by filters on north service itself const std::vector<Reading *>& readingsVec = readings->getAllReadings(); unsigned long lastSent = 0; for (auto rdngPtrItr = readingsVec.crbegin(); rdngPtrItr != readingsVec.crend(); rdngPtrItr++) { if((*rdngPtrItr)->hasId()) // only consider readings with valid reading IDs { lastSent = (*rdngPtrItr)->getId(); break; } } // unsigned long lastSent = readings->getReadingId(sent); if (m_perfMonitor) { m_perfMonitor->collect("Readings sent", sent); m_perfMonitor->collect("Percentage readings sent", (100 * sent) / to_send); } Logger::getLogger()->debug("DataSender::send(): to_send=%d, sent=%d, lastSent=%lu", to_send, sent, lastSent); if (sent > 0) { // lastSent = readings->getLastId(); // Update asset tracker table/cache, if required vector<Reading *> *vec = readings->getAllReadingsPtr(); for (vector<Reading *>::iterator it = vec->begin(); it != vec->end(); ) { Reading *reading = *it; if (!reading->hasId() || reading->getId() <= lastSent) { AssetTrackingTuple tuple(m_service->getName(), m_service->getPluginName(), reading->getAssetName(), "Egress"); if (!AssetTracker::getAssetTracker()->checkAssetTrackingCache(tuple)) { AssetTracker::getAssetTracker()->addAssetTrackingTuple(tuple); m_logger->info("sendDataThread: Adding new asset tracking tuple - egress: %s", tuple.assetToString().c_str()); } // Remove current reading delete reading; reading = NULL; // Remove item and set iterator to next element it = vec->erase(it); } else { break; } } updateStatistics(sent); return lastSent; } return 0; } /** * Cause the data sender process to pause sending data until a corresponding release call is made. * * This call does not block until release is called, but does block until the current * send completes. * * Called by external classes that want to prevent interaction * with the north plugin. */ void DataSender::pause() { unique_lock<mutex> lck(m_pauseMutex); m_pauseCV.wait(lck, [this]{ return m_sending == false; }); m_paused = true; } /** * Release the paused data sender thread * * Called by external classes that want to release interaction * with thew north plugin. */ void DataSender::release() { { std::lock_guard<std::mutex> lck(m_pauseMutex); m_paused = false; } m_pauseCV.notify_all(); } /** * Check if we have paused the sending of data * * Called before we interact with the north plugin by the * DataSender class */ void DataSender::blockPause() { unique_lock<mutex> lck(m_pauseMutex); m_pauseCV.wait(lck, [this]{ return m_paused == false; }); m_sending = true; } /* * Release the block on pausing the sender * * Called after we interact with the north plugin by the * DataSender class */ void DataSender::releasePause() { { std::lock_guard<std::mutex> lck(m_pauseMutex); m_sending = false; } m_pauseCV.notify_all(); } /** * Update the sent statistics * * @param increment Increment of the number of readings sent */ void DataSender::updateStatistics(uint32_t increment) { lock_guard<mutex> guard(m_statsMtx); // Add statistics counter to the map m_statsPendingEntries[m_loader->getName()] += increment; m_statsPendingEntries["Readings Sent"] += increment; } /** * Flush statistics to storage service */ void DataSender::flushStatistics() { // Wait for FLUSH_STATS_INTERVAL seconds or receive notification // when shutdown is called unique_lock<mutex> flush(m_flushStatsMtx); m_statsCv.wait_for(flush, std::chrono::seconds(FLUSH_STATS_INTERVAL)); flush.unlock(); std::map<std::string, unsigned int> statsData; // Acquire m_statsMtx lock for m_statsMtx unique_lock<mutex> lck(m_statsMtx); // copy statistics map statsData = m_statsPendingEntries; // Reset statistics m_statsPendingEntries.clear(); // Release lock lck.unlock(); if (statsData.empty()) { return; } vector<pair<ExpressionValues *, Where *>> statsUpdates; const Condition conditionStat(Equals); // Send statistics to storage service map<string, unsigned int>::iterator it; for (it = statsData.begin(); it != statsData.end(); it++) { // Prepare "WHERE key = name Where *nStat = new Where("key", conditionStat, it->first); // Prepare value = value + inc ExpressionValues *updateValue = new ExpressionValues; updateValue->push_back(Expression("value", "+", (int) it->second)); statsUpdates.emplace_back(updateValue, nStat); // Check whether to create stats entry into the storage if (m_statsDbEntriesCache.find(it->first) == m_statsDbEntriesCache.end()) { if (createStats(it->first, it->second)) { m_statsDbEntriesCache.insert(it->first); } } Logger::getLogger()->debug("Flushing statistics '%s': %d", it->first.c_str(), it->second); } // Bulk update if (m_loader->getStorage()) { // Do the update int rv = m_loader->getStorage()->updateTable("statistics", statsUpdates); // Check for errors if (rv < 1) { if (++m_statsUpdateFails > STATS_UPDATE_FAIL_THRESHOLD) { Logger::getLogger()->warn("Update of statistics failure has persisted, attempting recovery"); m_statsDbEntriesCache.clear(); // Create statistics rows if not existant if (createStats("Readings Sent", 0)) { m_statsDbEntriesCache.insert("Readings Sent"); } if (createStats(m_loader->getName(), 0)) { m_statsDbEntriesCache.insert(m_loader->getName()); } m_statsUpdateFails = 0; } else if (m_statsUpdateFails == 1) { Logger::getLogger()->warn("Update of statistics failed"); } else { Logger::getLogger()->warn("Update of statistics still failing"); } } } } /** * Create a row into statistic table for each statistic * * @param key The statistics key to create * @param value The statistics value * @return True for created data, False for no operation or error */ bool DataSender::createStats(const std::string &key, unsigned int value) { if (!m_loader->getStorage()) { return false; } // SELECT * FROM fledge.statiatics WHERE key = statistics_key const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, key); Query qKey(wKey); ResultSet* result = 0; // Query via storage client result = m_loader->getStorage()->queryTable("statistics", qKey); bool doInsert = !result->rowCount(); delete result; if (!doInsert) { // Row already exists return true; } string description; if (key == m_loader->getName()) { description = key + " Readings Sent"; } else { description = key + " North"; } InsertValues values; values.push_back(InsertValue("key", key)); values.push_back(InsertValue("description", description)); values.push_back(InsertValue("value", (long)value)); string table = "statistics"; if (m_loader->getStorage()->insertTable(table, values) != 1) { Logger::getLogger()->error("Failed to insert a new "\ "row into the 'statistics' table, key '%s'", key.c_str()); return false; } else { Logger::getLogger()->info("New row added into 'statistics' table, key '%s'", key.c_str()); return true; } return false; } /** * Check status of dryrun flag * * @return True dryrun flag is true */ bool DataSender::isDryRun() { return m_service->getDryRun(); } /** * Notify the data sender that there has been a configuration change. * This is used to wake up early from the wait that is performed when * we have failures to send. Changing the configuration may resolve the * issue causing the send failure. */ void DataSender::configChange() { lock_guard<std::mutex> lck(m_backoffMutex); // Wakeup any sleep sender thread m_backoffCV.notify_all(); } ================================================ FILE: C/services/north/include/data_load.h ================================================ #ifndef _DATA_LOAD_H #define _DATA_LOAD_H #include <string> #include <thread> #include <mutex> #include <condition_variable> #include <deque> #include <storage_client.h> #include <reading.h> #include <filter_pipeline.h> #include <service_handler.h> #include <perfmonitors.h> #define DEFAULT_BLOCK_SIZE 100 /** * A class used in the North service to load data from the buffer * * This class is responsible for loading the reading from the * storage service and buffering them ready for the egress thread * to process them. */ class DataLoad : public ServiceHandler { public: DataLoad(const std::string& name, long streamId, StorageClient *storage); virtual ~DataLoad(); void loadThread(); bool setDataSource(const std::string& source); void triggerRead(unsigned int blockSize); void updateLastSentId(unsigned long id); void flushLastSentId(); ReadingSet *fetchReadings(bool wait); static void passToOnwardFilter(OUTPUT_HANDLE *outHandle, READINGSET* readings); static void pipelineEnd(OUTPUT_HANDLE *outHandle, READINGSET* readings); void shutdown(); void restart(); bool isRunning() { return !m_shutdown; }; void configChange(const std::string& category, const std::string& newConfig); void configChildCreate(const std::string& , const std::string&, const std::string&){}; void configChildDelete(const std::string& , const std::string&){}; unsigned long getLastFetched() { return m_lastFetched; }; void setBlockSize(unsigned long blockSize) { m_blockSize = blockSize; }; void setStreamUpdate(unsigned long streamUpdate) { m_streamUpdate = streamUpdate; m_nextStreamUpdate = streamUpdate; }; void setPerfMonitor(PerformanceMonitor *perfMonitor) { m_perfMonitor = perfMonitor; }; const std::string &getName() { return m_name; }; StorageClient *getStorage() { return m_storage; }; void setPrefetchLimit(unsigned int limit) { m_prefetchLimit = limit; }; // Debugger entry points bool attachDebugger() { if (m_pipeline) { m_debuggerAttached = true; return m_pipeline->attachDebugger(); } return false; }; void detachDebugger() { if (m_pipeline) { m_debuggerAttached = false; m_debuggerBufferSize = 1; m_pipeline->detachDebugger(); } }; void setDebuggerBuffer(unsigned int size) { if (m_pipeline) { m_debuggerBufferSize = size; m_pipeline->setDebuggerBuffer(size); } }; std::string getDebuggerBuffer() { std::string rval; if (m_pipeline) rval = m_pipeline->getDebuggerBuffer(); return rval; }; void isolate(bool isolate) { std::lock_guard<std::mutex> guard(m_isolateMutex); m_isolate = isolate; }; bool isolated() { std::lock_guard<std::mutex> guard(m_isolateMutex); return m_isolate; }; bool replayDebugger() { if (m_pipeline) { return m_pipeline->replayDebugger(); } else { return false; } }; void suspendIngest(bool suspend) { std::lock_guard<std::mutex> guard(m_suspendMutex); m_suspendIngest = suspend; m_steps = 0; }; bool isSuspended() { std::lock_guard<std::mutex> guard(m_suspendMutex); return m_suspendIngest; }; void stepDebugger(unsigned int steps) { std::lock_guard<std::mutex> guard(m_suspendMutex); m_steps = steps; }; bool willStep() { std::lock_guard<std::mutex> guard(m_suspendMutex); if (m_suspendIngest && m_steps > 0) { return true; } return false; }; private: void readBlock(unsigned int blockSize); unsigned int waitForReadRequest(); unsigned long getLastSentId(); int createNewStream(); ReadingSet *fetchStatistics(unsigned int blockSize); ReadingSet *fetchAudit(unsigned int blockSize); void bufferReadings(ReadingSet *readings); bool loadFilters(const std::string& category); private: const std::string& m_name; long m_streamId; StorageClient *m_storage; volatile bool m_shutdown; std::thread *m_thread; std::mutex m_mutex; std::condition_variable m_cv; std::condition_variable m_fetchCV; unsigned int m_readRequest; enum { SourceReadings, SourceStatistics, SourceAudit } m_dataSource; unsigned long m_lastFetched; std::deque<ReadingSet *> m_queue; std::mutex m_qMutex; FilterPipeline *m_pipeline; std::mutex m_pipelineMutex; unsigned long m_blockSize; PerformanceMonitor *m_perfMonitor; int m_streamUpdate; unsigned long m_streamSent; int m_nextStreamUpdate; unsigned int m_prefetchLimit; bool m_flushRequired; std::mutex m_isolateMutex; bool m_isolate; bool m_debuggerAttached; unsigned int m_debuggerBufferSize; bool m_suspendIngest; unsigned int m_steps; std::mutex m_suspendMutex; }; #endif ================================================ FILE: C/services/north/include/data_sender.h ================================================ #ifndef _DATA_SENDER_H #define _DATA_SENDER_H #include <north_plugin.h> #include <reading_set.h> #include <logger.h> #include <thread> #include <mutex> #include <condition_variable> #include <perfmonitors.h> // Send statistics to storage in seconds #define FLUSH_STATS_INTERVAL 5 // Failure counter before re-recreating statics rows #define STATS_UPDATE_FAIL_THRESHOLD 3 // BAckoff sending when we see repeated failures #define FAILURE_BACKOFF_THRESHOLD 10 // Number of consequetive failures to trigger backoff #define MIN_SEND_BACKOFF 50 // Min backoff in milliseconds #define MAX_SEND_BACKOFF 60000 // Max backoff in milliseconds class DataLoad; class NorthService; class DataSender { public: DataSender(NorthPlugin *plugin, DataLoad *loader, NorthService *north); ~DataSender(); void sendThread(); void updatePlugin(NorthPlugin *plugin) { m_plugin = plugin; }; void pause(); void release(); void setPerfMonitor(PerformanceMonitor *perfMonitor) { m_perfMonitor = perfMonitor; }; bool isRunning() { return !m_shutdown; }; void flushStatistics(); bool isDryRun(); void configChange(); private: void updateStatistics(uint32_t increment); bool createStats(const std::string &key, unsigned int value); unsigned long send(ReadingSet *readings); void blockPause(); void releasePause(); private: NorthPlugin *m_plugin; DataLoad *m_loader; NorthService *m_service; volatile bool m_shutdown; std::thread *m_thread; Logger *m_logger; bool m_paused; bool m_sending; std::mutex m_pauseMutex; std::condition_variable m_pauseCV; PerformanceMonitor *m_perfMonitor; // Statistics send via thread std::thread *m_statsThread; std::mutex m_flushStatsMtx; // Statistics save map std::condition_variable m_statsCv; std::mutex m_statsMtx; std::map<std::string, unsigned int> m_statsPendingEntries; int m_statsUpdateFails; // confirmed stats table entries std::unordered_set<std::string> m_statsDbEntriesCache; unsigned int m_repeatedFailure; unsigned int m_sendBackoffTime; std::mutex m_backoffMutex; std::condition_variable m_backoffCV; }; #endif ================================================ FILE: C/services/north/include/defaults.h ================================================ #ifndef _DEFAULTS_H #define _DEFAULTS_H /* * Fledge north service configuration defaults for the advanced category. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ static struct { const char *name; const char *displayName; const char *description; const char *type; const char *value; } defaults[] = { { NULL, NULL, NULL, NULL, NULL } }; #endif ================================================ FILE: C/services/north/include/north_api.h ================================================ #ifndef _NORTH_API_H #define _NORTH_API_H /* * Fledge north service API. * * Copyright (c) 2025 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <server_http.hpp> // Debugger URLs #define DEBUG_ATTACH "^/fledge/north/debug/attach$" #define DEBUG_DETACH "^/fledge/north/debug/detach$" #define DEBUG_BUFFER "^/fledge/north/debug/buffer$" #define DEBUG_ISOLATE "^/fledge/north/debug/isolate$" #define DEBUG_SUSPEND "^/fledge/north/debug/suspend$" #define DEBUG_STEP "^/fledge/north/debug/step$" #define DEBUG_REPLAY "^/fledge/north/debug/replay$" #define DEBUG_STATE "^/fledge/north/debug/state$" class NorthService; typedef std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Response> Response; typedef std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Request> Request; class NorthApi { public: NorthApi(NorthService *); ~NorthApi(); unsigned short getListenerPort(); void startServer(); // Debugger entry points void attachDebugger(Response response, Request request); void detachDebugger(Response response, Request request); void setDebuggerBuffer(Response response, Request request); void getDebuggerBuffer(Response response, Request request); void isolateDebugger(Response response, Request request); void suspendDebugger(Response response, Request request); void stepDebugger(Response response, Request request); void replayDebugger(Response response, Request request); void stateDebugger(Response response, Request request); private: SimpleWeb::Server<SimpleWeb::HTTP> *m_server; NorthService *m_service; std::thread *m_thread; Logger *m_logger; }; #endif ================================================ FILE: C/services/north/include/north_plugin.h ================================================ #ifndef _NORTH_PLUGIN #define _NORTH_PLUGIN /* * Fledge north service. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin.h> #include <plugin_manager.h> #include <config_category.h> #include <string> #include <reading.h> typedef void (*INGEST_CB)(void *, Reading); typedef void (*INGEST_CB2)(void *, std::vector<Reading *>*); /** * Class that represents a north plugin. * * The purpose of this class is to hide the use of the pointers into the * dynamically loaded plugin and wrap the interface into a class that * can be used directly in the north subsystem. * * This is achieved by having a set of private member variables which are * the pointers to the functions in the plugin, and a set of public methods * that will call these functions via the function pointers. */ class NorthPlugin : public Plugin { public: NorthPlugin(PLUGIN_HANDLE handle, const ConfigCategory& category); ~NorthPlugin(); uint32_t send(const std::vector<Reading *>& readings); void reconfigure(const std::string&); void shutdown(); bool persistData() { return info->options & SP_PERSIST_DATA; }; void start(); void startData(const std::string& pluginData); std::string shutdownSaveData(); bool hasControl() { return info->options & SP_CONTROL; }; void pluginRegister(bool ( *write)(char *name, char *value, ControlDestination destination, ...), int (* operation)(char *operation, int paramCount, char *names[], char *parameters[], ControlDestination destination, ...)); private: PLUGIN_HANDLE m_instance; uint32_t (*pluginSendPtr)(PLUGIN_HANDLE, const std::vector<Reading *>& readings); void (*pluginReconfigurePtr)(PLUGIN_HANDLE*, const std::string& newConfig); void (*pluginShutdownPtr)(PLUGIN_HANDLE); std::string (*pluginShutdownDataPtr)(const PLUGIN_HANDLE); void (*pluginStartPtr)(PLUGIN_HANDLE); void (*pluginStartDataPtr)(PLUGIN_HANDLE, const std::string& pluginData); void (*pluginRegisterPtr)(PLUGIN_HANDLE handle, bool ( *write)(char *name, char *value, ControlDestination destination, ...), int (* operation)(char *operation, int paramCount, char *names[], char *parameters[], ControlDestination destination, ...)); }; #endif ================================================ FILE: C/services/north/include/north_service.h ================================================ #ifndef _NORTH_SERVICE_H #define _NORTH_SERVICE_H /* * Fledge north service. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <north_plugin.h> #include <service_handler.h> #include <storage_client.h> #include <config_category.h> #include <filter_plugin.h> #include <mutex> #include <condition_variable> #include <audit_logger.h> #include <perfmonitors.h> #include <data_load.h> #include <data_sender.h> #define SERVICE_NAME "Fledge North" /** * State bits for the south pipeline debugger */ #define DEBUG_ATTACHED 0x01 #define DEBUG_SUSPENDED 0x02 #define DEBUG_ISOLATED 0x04 class NorthServiceProvider; /** * The NorthService class. This class is the core * of the service that provides north side services * to Fledge. */ class NorthService : public ServiceAuthHandler { public: NorthService(const std::string& name, const std::string& token = ""); virtual ~NorthService(); void start(std::string& coreAddress, unsigned short corePort); void stop(); void shutdown(); void restart(); void configChange(const std::string&, const std::string&); void configChildCreate(const std::string& , const std::string&, const std::string&){}; void configChildDelete(const std::string& , const std::string&){}; bool isRunning() { return !m_shutdown; }; const std::string& getName() { return m_name; }; const std::string& getPluginName() { return m_pluginName; }; void pause(); void release(); bool write(const std::string& name, const std::string& value, const ControlDestination); bool write(const std::string& name, const std::string& value, const ControlDestination, const std::string& arg); int operation(const std::string& name, int paramCount, char *names[], char *parameters[], const ControlDestination); int operation(const std::string& name, int paramCount, char *names[], char *parameters[], const ControlDestination, const std::string& arg); void setDryRun() { m_dryRun = true; }; bool getDryRun() { return m_dryRun; }; void alertFailures(); void clearFailures(); // Debugger Entry point bool attachDebugger() { if (m_dataLoad) { m_debugState = DEBUG_ATTACHED; return m_dataLoad->attachDebugger(); } return false; }; void detachDebugger() { if (m_dataLoad) m_dataLoad->detachDebugger(); suspendDebugger(false); isolateDebugger(false); m_debugState = 0; }; void setDebuggerBuffer(unsigned int size) { if (m_dataLoad) m_dataLoad->setDebuggerBuffer(size); }; std::string getDebuggerBuffer() { if (m_dataLoad) return m_dataLoad->getDebuggerBuffer(); return ""; }; void suspendDebugger(bool suspend) { if (m_dataLoad) { m_dataLoad->suspendIngest(suspend); if (suspend) m_debugState |= DEBUG_SUSPENDED; else m_debugState &= ~(unsigned int)DEBUG_SUSPENDED; } }; void isolateDebugger(bool isolate) { if (m_dataLoad) { m_dataLoad->isolate(isolate); if (isolate) m_debugState |= DEBUG_ISOLATED; else m_debugState &= ~(unsigned int)DEBUG_ISOLATED; } }; void stepDebugger(unsigned int steps) { if (m_dataLoad) m_dataLoad->stepDebugger(steps); } bool replayDebugger() { if (m_dataLoad) { return m_dataLoad->replayDebugger(); } else { return false; } }; std::string debugState(); bool debuggerAttached() { return m_debugState & DEBUG_ATTACHED; } bool allowDebugger() { return m_allowDebugger; } private: void addConfigDefaults(DefaultConfigCategory& defaults); bool loadPlugin(); void createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name,std::string current_name); void restartPlugin(); void updateFeatures(const ConfigCategory& category); private: std::string controlSource(); bool sendToService(const std::string& southService, const std::string& name, const std::string& value); bool sendToDispatcher(const std::string& path, const std::string& payload); DataLoad *m_dataLoad; DataSender *m_dataSender; NorthPlugin *northPlugin; std::string m_pluginName; Logger *logger; AssetTracker *m_assetTracker; volatile bool m_shutdown; ConfigCategory m_config; ConfigCategory m_configAdvanced; StorageClient *m_storage; std::mutex m_mutex; std::condition_variable m_cv; PluginData *m_pluginData; bool m_restartPlugin; const std::string m_token; bool m_allowControl; bool m_dryRun; bool m_requestRestart; AuditLogger *m_auditLogger; PerformanceMonitor *m_perfMonitor; unsigned int m_debugState; NorthServiceProvider *m_provider; bool m_allowDebugger; }; /** * * A data provider class to return data in the north service ping response */ class NorthServiceProvider : public JSONProvider { public: NorthServiceProvider(NorthService *north) : m_north(north) {}; virtual ~NorthServiceProvider() {}; void asJSON(std::string &json) const { if (m_north) { json = "\"debug\" : " + m_north->debugState(); } }; private: NorthService *m_north; }; #endif ================================================ FILE: C/services/north/north.cpp ================================================ /* * Fledge north service. * * Copyright (c) 2020 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <sys/timerfd.h> #include <time.h> #include <stdint.h> #include <stdlib.h> #include <signal.h> #include <execinfo.h> #include <dlfcn.h> // for dladdr #include <cxxabi.h> // for __cxa_demangle #include <unistd.h> #include <north_service.h> #include <north_api.h> #include <management_api.h> #include <storage_client.h> #include <service_record.h> #include <plugin_manager.h> #include <plugin_api.h> #include <plugin.h> #include <logger.h> #include <reading.h> #include <data_load.h> #include <data_sender.h> #include <iostream> #include <defaults.h> #include <filter_plugin.h> #include <config_handler.h> #include <syslog.h> #include <stdarg.h> #include <string_utils.h> #include <audit_logger.h> #define SERVICE_TYPE "Northbound" extern int makeDaemon(void); extern void handler(int sig); static const char *defaultServiceConfig = QUOTE({ "enable": { "description": "A switch that can be used to enable or disable execution of the sending process.", "type": "boolean", "default": "true" , "readonly": "true" }, "streamId": { "description": "Identifies the specific stream to handle and the related information, among them the ID of the last object streamed.", "type": "integer", "default": "0", "readonly": "true" } }); using namespace std; static NorthService *service; /** * Callback function when a plugin wishes to perform a write operation * * @param name The name of the value to write * @param value The value to write * @param destination Where to write the value */ static bool controlWrite(char *name, char *value, ControlDestination destination, ...) { va_list ap; bool rval = false; switch (destination) { case DestinationAsset: case DestinationService: case DestinationScript: { va_start(ap, destination); char *arg1 = va_arg(ap, char *); va_end(ap); rval = service->write(name, value, destination, arg1); break; } case DestinationBroadcast: rval = service->write(name, value, destination); break; default: Logger::getLogger()->error("Unknown control write destination %d", destination); } return rval; } /** * Callback function when a plugin wishes to perform a control operation * * @param operation The name of the operation to perform * @param paramCount The count of the number of parameters * @param names The names of the parameters * @param parameters The values of the parameters * @param destiantion The destiantion for the operation */ static int controlOperation(char *operation, int paramCount, char *names[], char *parameters[], ControlDestination destination, ...) { va_list ap; int rval = -1; switch (destination) { case DestinationAsset: case DestinationService: va_start(ap, destination); rval = service->operation(operation, paramCount, names, parameters, destination, va_arg(ap, char *)); va_end(ap); break; case DestinationBroadcast: rval = service->operation(operation, paramCount, names, parameters, destination); break; default: Logger::getLogger()->error("Unknown control operation destination %d for operation %s", destination, operation); } return rval; } // Displays service information in JSON format static void printServiceInfoAsJSON() { static std::string serviceInfoJSON = R"({"name":"North Service","description":"Service To Egress Data","type":")" + std::string(SERVICE_TYPE) + R"(","process":"north_C","process_script":"[\"services/north_C\"]","startup_priority":200})"; std::cout << serviceInfoJSON << std::endl; } /** * North service main entry point */ int main(int argc, char *argv[]) { unsigned short corePort = 8082; string coreAddress = "localhost"; bool daemonMode = true; string myName = SERVICE_NAME; string logLevel = "warning"; string token = ""; bool dryRun = false; signal(SIGSEGV, handler); signal(SIGILL, handler); signal(SIGBUS, handler); signal(SIGFPE, handler); signal(SIGABRT, handler); for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--info")) { printServiceInfoAsJSON(); return 0; } if (!strcmp(argv[i], "-d")) { daemonMode = false; } else if (!strncmp(argv[i], "--port=", 7)) { corePort = (unsigned short)strtol(&argv[i][7], NULL, 10); } else if (!strncmp(argv[i], "--name=", 7)) { myName = &argv[i][7]; } else if (!strncmp(argv[i], "--address=", 10)) { coreAddress = &argv[i][10]; } else if (!strncmp(argv[i], "--logLevel=", 11)) { logLevel = &argv[i][11]; } else if (!strncmp(argv[i], "--token=", 8)) { token = &argv[i][8]; } else if (!strncmp(argv[i], "--dryrun", 8)) { dryRun = true; } } #ifdef PROFILING char profilePath[200]{0}; if (getenv("FLEDGE_DATA")) { snprintf(profilePath, sizeof(profilePath), "%s/%s_Profile", getenv("FLEDGE_DATA"), myName.c_str()); } else if (getenv("FLEDGE_ROOT")) { snprintf(profilePath, sizeof(profilePath), "%s/data/%s_Profile", getenv("FLEDGE_ROOT"), myName.c_str()); } else { snprintf(profilePath, sizeof(profilePath), "/usr/local/fledge/data/%s_Profile", myName.c_str()); } mkdir(profilePath, 0777); chdir(profilePath); #endif if (daemonMode && makeDaemon() == -1) { // Failed to run in daemon mode cout << "Failed to run as deamon - proceeding in interactive mode." << endl; } service = new NorthService(myName, token); if (dryRun) { service->setDryRun(); } Logger::getLogger()->setMinLevel(logLevel); service->start(coreAddress, corePort); delete service; return 0; } /** * Detach the process from the terminal and run in the background. */ int makeDaemon() { pid_t pid; /* Make the child process inherit the log level */ int logmask = setlogmask(0); /* create new process */ if ((pid = fork() ) == -1) { return -1; } else if (pid != 0) { exit (EXIT_SUCCESS); } setlogmask(logmask); // If we got here we are a child process // create new session and process group if (setsid() == -1) { return -1; } // Close stdin, stdout and stderr close(0); close(1); close(2); // redirect fd's 0,1,2 to /dev/null open("/dev/null", O_RDWR); // stdin if (dup(0) == -1) {} // stdout GCC bug 66425 produces warning if (dup(0) == -1) {} // stderr GCC bug 66425 produces warning return 0; } void handler(int sig) { Logger *logger = Logger::getLogger(); void *array[20]; char buf[1024]; int size; // get void*'s for all entries on the stack size = backtrace(array, 20); // print out all the frames to stderr logger->fatal("Signal %d (%s) trapped:\n", sig, strsignal(sig)); char **messages = backtrace_symbols(array, size); for (int i = 0; i < size; i++) { Dl_info info; if (dladdr(array[i], &info) && info.dli_sname) { char *demangled = NULL; int status = -1; if (info.dli_sname[0] == '_') demangled = abi::__cxa_demangle(info.dli_sname, NULL, 0, &status); snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd---------", i, int(2 + sizeof(void*) * 2), array[i], status == 0 ? demangled : info.dli_sname == 0 ? messages[i] : info.dli_sname, (char *)array[i] - (char *)info.dli_saddr); free(demangled); } else { snprintf(buf, sizeof(buf), "%-3d %*p %s---------", i, int(2 + sizeof(void*) * 2), array[i], messages[i]); } logger->fatal("(%d) %s", i, buf); } free(messages); exit(1); } /** * Constructor for the north service */ NorthService::NorthService(const string& myName, const string& token) : m_dataLoad(NULL), m_dataSender(NULL), northPlugin(NULL), m_assetTracker(NULL), m_shutdown(false), m_storage(NULL), m_pluginData(NULL), m_restartPlugin(false), m_token(token), m_allowControl(true), m_dryRun(false), m_requestRestart(), m_auditLogger(NULL), m_perfMonitor(NULL), m_debugState(0), m_provider(NULL), m_allowDebugger(true) { m_name = myName; logger = new Logger(myName); logger->setMinLevel("warning"); } /** * Destructor for the north service */ NorthService::~NorthService() { if (m_perfMonitor) delete m_perfMonitor; if (northPlugin) delete northPlugin; if (m_storage) delete m_storage; if (m_dataLoad) delete m_dataLoad; if (m_dataSender) delete m_dataSender; if (m_pluginData) delete m_pluginData; if (m_assetTracker) delete m_assetTracker; if (m_auditLogger) delete m_auditLogger; if (m_mgtClient) delete m_mgtClient; if (m_provider) delete m_provider; delete logger; } /** * Start the north service */ void NorthService::start(string& coreAddress, unsigned short corePort) { unsigned short managementPort = (unsigned short)0; ManagementApi management(SERVICE_NAME, managementPort); // Start managemenrt API logger->info("Starting north service..."); NorthServiceProvider *provider = new NorthServiceProvider(this); management.registerProvider(provider); management.registerService(this); // Listen for incomming managment requests management.start(); // Create the south API NorthApi *api = new NorthApi(this); if (!api) { logger->fatal("Unable to create API object"); return; } // Allow time for the listeners to start before we register sleep(1); if (! m_shutdown) { unsigned short sport = api->getListenerPort(); // Now register our service // TODO proper hostname lookup unsigned short managementListener = management.getListenerPort(); ServiceRecord record(m_name, // Service name SERVICE_TYPE, // Service type "http", // Protocol "localhost", // Listening address sport, // Service port managementListener, // Management port m_token); // Token); m_mgtClient = new ManagementClient(coreAddress, corePort); m_auditLogger = new AuditLogger(m_mgtClient); // Create an empty North category if one doesn't exist DefaultConfigCategory northConfig(string("North"), string("{}")); northConfig.setDescription("North"); m_mgtClient->addCategory(northConfig, true); // Fetch Configuration m_config = m_mgtClient->getCategory(m_name); if (!loadPlugin()) { logger->fatal("Failed to load north plugin, exiting..."); management.stop(); return; } if (!m_dryRun) { if (!m_mgtClient->registerService(record)) { logger->error("Failed to register service %s", m_name.c_str()); management.stop(); return; } ConfigCategory features = m_mgtClient->getCategory("FEATURES"); updateFeatures(features); ConfigHandler *configHandler = ConfigHandler::getInstance(m_mgtClient); configHandler->registerCategory(this, m_name); configHandler->registerCategory(this, m_name+"Advanced"); configHandler->registerCategory(this, "FEATURES"); } // Get a handle on the storage layer ServiceRecord storageRecord("Fledge Storage"); if (!m_mgtClient->getService(storageRecord)) { logger->fatal("Unable to find storage service"); if (!m_dryRun) { if (m_requestRestart) m_mgtClient->restartService(); else m_mgtClient->unregisterService(); } return; } logger->info("Connect to storage on %s:%d", storageRecord.getAddress().c_str(), storageRecord.getPort()); m_storage = new StorageClient(storageRecord.getAddress(), storageRecord.getPort()); m_storage->registerManagement(m_mgtClient); // Setup the performance monitor m_perfMonitor = new PerformanceMonitor(m_name, m_storage); if (m_configAdvanced.itemExists("perfmon")) { string perf = m_configAdvanced.getValue("perfmon"); if (perf.compare("true") == 0) m_perfMonitor->setCollecting(true); else m_perfMonitor->setCollecting(false); } logger->debug("Initialise the asset tracker"); m_assetTracker = new AssetTracker(m_mgtClient, m_name); AssetTracker::getAssetTracker()->populateAssetTrackingCache(m_name, "Egress"); // If the plugin supports control register the callback functions if (northPlugin->hasControl()) { northPlugin->pluginRegister(controlWrite, controlOperation); } // Deal with persisted data and start the plugin if (!m_dryRun) { if (northPlugin->persistData()) { logger->debug("Plugin %s requires persisted data", m_pluginName.c_str()); m_pluginData = new PluginData(m_storage); string key = m_name + m_pluginName; string storedData = m_pluginData->loadStoredData(key); logger->debug("Starting plugin with storedData: %s", storedData.c_str()); northPlugin->startData(storedData); } else { logger->debug("Start %s plugin", m_pluginName.c_str()); northPlugin->start(); } } // Create default security category this->createSecurityCategories(m_mgtClient, m_dryRun); // Setup the data loading long streamId = 0; if (m_config.itemExists("streamId")) { streamId = strtol(m_config.getValue("streamId").c_str(), NULL, 10); } logger->debug("Create threads for stream %d", streamId); m_dataLoad = new DataLoad(m_name, streamId, m_storage); m_dataLoad->setPerfMonitor(m_perfMonitor); if (m_config.itemExists("source")) { m_dataLoad->setDataSource(m_config.getValue("source")); } if (m_configAdvanced.itemExists("blockSize")) { unsigned long newBlock = strtoul( m_configAdvanced.getValue("blockSize").c_str(), NULL, 10); if (newBlock > 0) { m_dataLoad->setBlockSize(newBlock); } } if (m_configAdvanced.itemExists("streamUpdate")) { unsigned long newStreamUpdate = strtoul( m_configAdvanced.getValue("streamUpdate").c_str(), NULL, 10); if (newStreamUpdate > 0) { m_dataLoad->setStreamUpdate(newStreamUpdate); } } if (m_configAdvanced.itemExists("prefetchLimnit")) { unsigned long limit = strtoul( m_configAdvanced.getValue("prefetchLimit").c_str(), NULL, 10); if (limit > 0) { m_dataLoad->setPrefetchLimit(limit); } } if (m_configAdvanced.itemExists("assetTrackerInterval")) { unsigned long interval = strtoul( m_configAdvanced.getValue("assetTrackerInterval").c_str(), NULL, 10); if (m_assetTracker) m_assetTracker->tune(interval); } m_dataSender = new DataSender(northPlugin, m_dataLoad, this); m_dataSender->setPerfMonitor(m_perfMonitor); if (!m_dryRun) { logger->debug("North service is running"); // wait for shutdown unique_lock<mutex> lck(m_mutex); while (!m_shutdown) { m_cv.wait(lck); logger->debug("North main thread woken up, shutdown %s", m_shutdown ? "true" : "false"); if (m_shutdown == false && m_restartPlugin) { restartPlugin(); } } logger->debug("North service is shutting down"); } else { logger->info("Dryrun of service, shutting down"); } m_dataLoad->shutdown(); // Forces the data load to return from any blocking fetch call delete m_dataSender; m_dataSender = NULL; logger->debug("North service data sender has shut down"); delete m_dataLoad; m_dataLoad = NULL; logger->debug("North service shutting down plugin"); // Shutdown the north plugin if (northPlugin && !m_dryRun) { if (m_pluginData) { logger->debug("North service persist plugin data"); string saveData = northPlugin->shutdownSaveData(); string key = m_name + m_pluginName; logger->debug("Persist plugin data, key: '%s' data: '%s' service name: '%s'", key.c_str(), saveData.c_str(), m_name.c_str()); if (!m_pluginData->persistPluginData(key, saveData, m_name)) { Logger::getLogger()->error("Plugin %s has failed to save data [%s] for key %s", m_pluginName.c_str(), saveData.c_str(), key.c_str()); } } else { northPlugin->shutdown(); } } if (!m_dryRun) { if (m_requestRestart) { // Request core to restart this service m_mgtClient->restartService(); } else { // Clean shutdown, unregister the storage service logger->info("Unregistering service"); m_mgtClient->unregisterService(); } } } management.stop(); logger->info("North service %s shutdown completed", m_dryRun ? "dry run execution " : ""); } /** * Stop the storage service/ */ void NorthService::stop() { logger->info("Stopping north service...\n"); } /** * Creates config categories and sub categories recursively, along with their parent-child relations */ void NorthService::createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name, std::string current_name) { // Deal with registering and fetching the configuration DefaultConfigCategory defConfig(configCategory); DefaultConfigCategory defConfigCategoryOnly(defConfig); defConfigCategoryOnly.keepItemsType(ConfigCategory::ItemType::CategoryType); defConfig.removeItemsType(ConfigCategory::ItemType::CategoryType); DefaultConfigCategory serviceConfig(current_name, defaultServiceConfig); defConfig += serviceConfig; defConfig.setDescription(current_name); // TODO We do not have access to the description // Create/Update category name (we pass keep_original_items=true) m_mgtClient->addCategory(defConfig, true); // Add this service under 'North' parent category vector<string> children; children.push_back(current_name); m_mgtClient->addChildCategories(parent_name, children); // Adds sub categories to the configuration bool extracted = true; ConfigCategory subCategory; while (extracted) { extracted = subCategory.extractSubcategory(defConfigCategoryOnly); if (extracted) { DefaultConfigCategory defSubCategory(subCategory); createConfigCategories(defSubCategory, current_name, subCategory.getName()); // Cleans the category subCategory.removeItems(); subCategory = ConfigCategory() ; } } } /** * Load the configured north plugin */ bool NorthService::loadPlugin() { try { PluginManager *manager = PluginManager::getInstance(); if (! m_config.itemExists("plugin")) { logger->error("Unable to fetch plugin name from configuration.\n"); return false; } m_pluginName = m_config.getValue("plugin"); logger->info("Loading north plugin %s.", m_pluginName.c_str()); PLUGIN_HANDLE handle; if ((handle = manager->loadPlugin(m_pluginName, PLUGIN_TYPE_NORTH)) != NULL) { // Adds categories and sub categories to the configuration DefaultConfigCategory defConfig(m_name, manager->getInfo(handle)->config); createConfigCategories(defConfig, string("North"), m_name); // Must now reload the configuration to obtain any items added from // the plugin // Removes all the m_items already present in the category m_config.removeItems(); m_config = m_mgtClient->getCategory(m_name); try { northPlugin = new NorthPlugin(handle, m_config); } catch (...) { return false; } // Deal with registering and fetching the advanced configuration string advancedCatName = m_name+string("Advanced"); DefaultConfigCategory defConfigAdvanced(advancedCatName, string("{}")); addConfigDefaults(defConfigAdvanced); defConfigAdvanced.setDescription(m_name+string(" advanced config params")); // Create/Update category name (we pass keep_original_items=true) m_mgtClient->addCategory(defConfigAdvanced, true); // Add this service under 'm_name' parent category vector<string> children1; children1.push_back(advancedCatName); m_mgtClient->addChildCategories(m_name, children1); // Must now reload the merged configuration m_configAdvanced = m_mgtClient->getCategory(advancedCatName); if (m_configAdvanced.itemExists("logLevel")) { string prevLogLevel = logger->getMinLevel(); logger->setMinLevel(m_configAdvanced.getValue("logLevel")); PluginManager *manager = PluginManager::getInstance(); PLUGIN_TYPE type = manager->getPluginImplType(northPlugin->getHandle()); logger->debug("%s:%d: North plugin type = %s", __FUNCTION__, __LINE__, (type==PYTHON_PLUGIN)?"PYTHON_PLUGIN":"BINARY_PLUGIN"); if (m_dataLoad) { logger->debug("%s:%d: calling m_dataLoad->configChange() for updating loglevel", __FUNCTION__, __LINE__); m_dataLoad->configChange("north filters", "logLevel"); } if (type == PYTHON_PLUGIN) { // propagate loglevel changes to python filters/plugins, if present logger->debug("prevLogLevel=%s, m_configAdvanced.getValue(\"logLevel\")=%s", prevLogLevel.c_str(), m_configAdvanced.getValue("logLevel").c_str()); if (prevLogLevel.compare(m_configAdvanced.getValue("logLevel")) != 0) { logger->debug("calling northPlugin->reconfigure() for updating loglevel"); northPlugin->reconfigure("logLevel"); } } } if (m_configAdvanced.itemExists("control")) { string c = m_configAdvanced.getValue("control"); if (c.compare("true") == 0) { m_allowControl = true; logger->warn("Control operations have been enabled"); } else { m_allowControl = false; logger->warn("Control operations have been disabled"); } } return true; } } catch (exception &e) { logger->fatal("Failed to load north plugin: %s\n", e.what()); } return false; } /** * Shutdown request */ void NorthService::shutdown() { /* Stop recieving new requests and allow existing * requests to drain. */ m_shutdown = true; logger->info("North service shutdown in progress."); // Signal main thread to shutdown m_cv.notify_all(); } /** * Restart request */ void NorthService::restart() { logger->info("North service restart in progress."); // Set restart action m_requestRestart = true; // Set shutdown action m_shutdown = true; // Signal main thread to shutdown m_cv.notify_all(); } /** * Configuration change notification */ void NorthService::configChange(const string& categoryName, const string& category) { logger->info("Configuration change in category %s: %s", categoryName.c_str(), category.c_str()); if (categoryName.compare(m_name) == 0) { m_config = ConfigCategory(m_name, category); m_restartPlugin = true; m_cv.notify_all(); if (m_dataLoad) { m_dataLoad->configChange(categoryName, category); } if (m_dataSender) { m_dataSender->configChange(); } } if (categoryName.compare(m_name+"Advanced") == 0) { m_configAdvanced = ConfigCategory(m_name+"Advanced", category); if (m_configAdvanced.itemExists("logLevel")) { string prevLogLevel = logger->getMinLevel(); logger->setMinLevel(m_configAdvanced.getValue("logLevel")); PluginManager *manager = PluginManager::getInstance(); PLUGIN_TYPE type = manager->getPluginImplType(northPlugin->getHandle()); logger->debug("%s:%d: North plugin type = %s", __FUNCTION__, __LINE__, (type==PYTHON_PLUGIN)?"PYTHON_PLUGIN":"BINARY_PLUGIN"); if (m_dataLoad) { logger->debug("%s:%d: calling m_dataLoad->configChange() for updating loglevel", __FUNCTION__, __LINE__); m_dataLoad->configChange("north filters", "logLevel"); } if (type == PYTHON_PLUGIN) { // propagate loglevel changes to python filters/plugins, if present logger->debug("prevLogLevel=%s, m_configAdvanced.getValue(\"logLevel\")=%s", prevLogLevel.c_str(), m_configAdvanced.getValue("logLevel").c_str()); if (prevLogLevel.compare(m_configAdvanced.getValue("logLevel")) != 0) { logger->debug("%s:%d: calling northPlugin->reconfigure() for updating loglevel", __FUNCTION__, __LINE__); northPlugin->reconfigure("logLevel"); } } } if (m_configAdvanced.itemExists("control")) { string c = m_configAdvanced.getValue("control"); if (c.compare("true") == 0) { m_allowControl = true; logger->warn("Control operations have been enabled"); } else { m_allowControl = false; logger->warn("Control operations have been disabled"); } } if (m_configAdvanced.itemExists("blockSize")) { unsigned long newBlock = strtoul( m_configAdvanced.getValue("blockSize").c_str(), NULL, 10); if (newBlock > 0) { m_dataLoad->setBlockSize(newBlock); } } if (m_configAdvanced.itemExists("streamUpdate")) { unsigned long newStreamUpdate = strtoul( m_configAdvanced.getValue("streamUpdate").c_str(), NULL, 10); if (newStreamUpdate > 0) { m_dataLoad->setStreamUpdate(newStreamUpdate); } } if (m_configAdvanced.itemExists("assetTrackerInterval")) { unsigned long interval = strtoul( m_configAdvanced.getValue("assetTrackerInterval").c_str(), NULL, 10); if (m_assetTracker) m_assetTracker->tune(interval); } if (m_configAdvanced.itemExists("perfmon")) { string perf = m_configAdvanced.getValue("perfmon"); if (perf.compare("true") == 0) m_perfMonitor->setCollecting(true); else m_perfMonitor->setCollecting(false); } } // Update the Security category if (categoryName.compare(m_name+"Security") == 0) { this->updateSecurityCategory(category); } if (categoryName.compare("FEATURES") == 0) { ConfigCategory conf("FEATURES", category); this->updateFeatures(conf); } } /** * Restart the plugin with an updated configuration. * We need to do this as north plugins do not have a reconfigure method * * We need to make sure we are not sending data and the send data thread does not startup * whilst we are doing the restart. * * We also need to make sure the send data thread gets the new plugin. */ void NorthService::restartPlugin() { m_restartPlugin = false; // Stop the send data thread m_dataSender->pause(); if (m_pluginData) { string saveData = northPlugin->shutdownSaveData(); string key = m_name + m_pluginName; logger->debug("Persist plugin data, key: '%s' data: '%s' service name: '%s'", key.c_str(), saveData.c_str(), m_name.c_str()); if (!m_pluginData->persistPluginData(key, saveData, m_name)) { Logger::getLogger()->error("Plugin %s has failed to save data [%s] for key %s", m_pluginName.c_str(), saveData.c_str(), key.c_str()); } } else { northPlugin->shutdown(); } delete northPlugin; northPlugin = NULL; loadPlugin(); // Deal with persisted data and start the plugin if (northPlugin->persistData()) { logger->debug("Plugin %s requires persisted data", m_pluginName.c_str()); m_pluginData = new PluginData(m_storage); string key = m_name + m_pluginName; string storedData = m_pluginData->loadStoredData(key); logger->debug("Starting plugin with storedData: %s", storedData.c_str()); northPlugin->startData(storedData); } else { logger->debug("Start %s plugin", m_pluginName.c_str()); northPlugin->start(); } m_dataSender->updatePlugin(northPlugin); m_dataSender->release(); // If the plugin supports control register the callback functions if (northPlugin->hasControl() && m_allowControl) { northPlugin->pluginRegister(controlWrite, controlOperation); } } /** * Add the generic north service configuration options to the advanced * category * * @param defaultConfiguration The default configuration from the plugin */ void NorthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) { for (int i = 0; defaults[i].name; i++) { defaultConfig.addItem(defaults[i].name, defaults[i].description, defaults[i].type, defaults[i].value, defaults[i].value); defaultConfig.setItemDisplayName(defaults[i].name, defaults[i].displayName); } if (northPlugin->hasControl()) { defaultConfig.addItem("control", "Allow write and control operations from the upstream system", "boolean", "true", "true"); defaultConfig.setItemDisplayName("control", "Allow Control"); } /* Add the set of logging levels to the service */ vector<string> logLevels = { "error", "warning", "info", "debug" }; defaultConfig.addItem("logLevel", "Minimum logging level reported", "warning", "warning", logLevels); defaultConfig.setItemDisplayName("logLevel", "Minimum Log Level"); // Add blockSize configuration item defaultConfig.addItem("blockSize", "The size of a block of data to send in each transmission.", "integer", std::to_string(DEFAULT_BLOCK_SIZE), std::to_string(DEFAULT_BLOCK_SIZE)); defaultConfig.setItemDisplayName("blockSize", "Data block size"); // Add streams update configuration item defaultConfig.addItem("streamUpdate", "Set the number of blocks to be sent before updating the stream location in the storage layer.", "integer", std::to_string(1), std::to_string(1)); defaultConfig.setItemDisplayName("streamUpdate", "Stream update frequency"); defaultConfig.setItemAttribute("streamUpdate", ConfigCategory::MINIMUM_ATTR, "1"); // Add prefetch limit item defaultConfig.addItem("prefetchLimit", "The maximum number of blocks to be prefetched and queued ready for transmission.", "integer", std::to_string(2), std::to_string(2)); defaultConfig.setItemDisplayName("prefetchLimit", "Data block prefetch"); defaultConfig.setItemAttribute("prefetchLimit", ConfigCategory::MINIMUM_ATTR, "2"); defaultConfig.setItemAttribute("prefetchLimit", ConfigCategory::MAXIMUM_ATTR, "10"); defaultConfig.addItem("assetTrackerInterval", "Number of milliseconds between updates of the asset tracker information", "integer", std::to_string(MIN_ASSET_TRACKER_UPDATE), std::to_string(MIN_ASSET_TRACKER_UPDATE)); defaultConfig.setItemDisplayName("assetTrackerInterval", "Asset Tracker Update"); defaultConfig.addItem("perfmon", "Track and store performance counters", "boolean", "false", "false"); defaultConfig.setItemDisplayName("perfmon", "Performance Counters"); } /** * Control write operation * * @param name Name of the variable to write * @param value Value to write to the variable * @param destination Where to write the value * @return true if write was succesfully sent to dispatcher, else false */ bool NorthService::write(const string& name, const string& value, const ControlDestination destination) { Logger::getLogger()->info("Control write %s with %s", name.c_str(), value.c_str()); if (destination != DestinationBroadcast) { Logger::getLogger()->error("Write destination requires an argument that is not given"); return -1; } // Build payload for dispatcher service string payload = "{ \"destination\" : \"broadcast\","; payload += controlSource(); payload += ", \"write\" : { \""; payload += name; payload += "\" : \""; string escaped = value; StringEscapeQuotes(escaped); payload += escaped; payload += "\" } }"; return sendToDispatcher("/dispatch/write", payload); } /** * Control write operation * * @param name Name of the variable to write * @param value Value to write to the variable * @param destination Where to write the value * @param arg Argument used to determine destination * @return true if write was succesfully sent to dispatcher, else false */ bool NorthService::write(const string& name, const string& value, const ControlDestination destination, const string& arg) { Logger::getLogger()->info("Control write %s with %s", name.c_str(), value.c_str()); // Build payload for dispatcher service string payload = "{ \"destination\" : \""; switch (destination) { case DestinationService: payload += "service\", \"name\" : \""; payload += arg; payload += "\""; break; case DestinationAsset: payload += "asset\", \"name\" : \""; payload += arg; payload += "\""; break; case DestinationScript: payload += "script\", \"name\" : \""; payload += arg; payload += "\""; break; case DestinationBroadcast: payload += "broadcast\""; break; } payload += ", "; payload += controlSource(); payload += ", \"write\" : { \""; payload += name; payload += "\" : \""; string escaped = value; StringEscapeQuotes(escaped); payload += escaped; payload += "\" } }"; return sendToDispatcher("/dispatch/write", payload); } /** * Control operation * * @param name Name of the operation to perform * @param paramCount The number of parameters * @param parameters The parameters to the operation * @param destination Where to write the value * @return -1 in case of error on operation destination, 1 if operation was succesfully sent to dispatcher, else 0 */ int NorthService::operation(const string& name, int paramCount, char *names[], char *parameters[], const ControlDestination destination) { Logger::getLogger()->info("Control operation %s with %d parameters", name.c_str(), paramCount); for (int i = 0; i < paramCount; i++) Logger::getLogger()->info("Parameter %d: %s", i, parameters[i]); if (destination != DestinationBroadcast) { Logger::getLogger()->error("Operation destination requires an argument that is not given"); return -1; } // Build payload for dispatcher service string payload = "{ \"destination\" : \"broadcast\","; payload += controlSource(); payload += ", \"operation\" : { \""; payload += name; payload += "\" : { "; for (int i = 0; i < paramCount; i++) { payload += "\""; payload += names[i]; payload += "\": \""; string escaped = parameters[i]; StringEscapeQuotes(escaped); payload += escaped; payload += "\""; if (i < paramCount -1) payload += ","; } payload += " } } }"; return static_cast<int>(sendToDispatcher("/dispatch/operation", payload)); } /** * Control write operation * * @param name Name of the operation to perform * @param paramCount The number of parameters * @param parameters The parameters to the operation * @param destination Where to write the value * @param arg Argument used to determine destination * @return 1 if operation was succesfully sent to dispatcher, else 0 */ int NorthService::operation(const string& name, int paramCount, char *names[], char *parameters[], const ControlDestination destination, const string& arg) { Logger::getLogger()->info("Control operation %s with %d parameters", name.c_str(), paramCount); for (int i = 0; i < paramCount; i++) Logger::getLogger()->info("Parameter %d: %s", i, parameters[i]); // Build payload for dispatcher service string payload = "{ \"destination\" : \""; switch (destination) { case DestinationService: payload += "service\", \"name\" : \""; payload += arg; payload += "\""; break; case DestinationAsset: payload += "asset\", \"name\" : \""; payload += arg; payload += "\""; break; case DestinationScript: payload += "script\", \"name\" : \""; payload += arg; payload += "\""; break; case DestinationBroadcast: payload += "broadcast\""; break; } payload += ", "; payload += controlSource(); payload += ", \"operation\" : { \""; payload += name; payload += "\" : { "; for (int i = 0; i < paramCount; i++) { payload += "\""; payload += names[i]; payload += "\": \""; string escaped = parameters[i]; StringEscapeQuotes(escaped); payload += escaped; payload += "\""; if (i < paramCount -1) payload += ","; } payload += "} } }"; return static_cast<int>(sendToDispatcher("/dispatch/operation", payload)); } /** * Send to a south service direct. This is temporary until we have the * service dispatcher in place. */ bool NorthService::sendToService(const string& southService, const string& name, const string& value) { std::string payload = "{ \"values\" : { \""; payload += name; payload += "\": \""; payload += value; payload += "\"} }"; // Send the control message to the south service try { ServiceRecord service(southService); if (!m_mgtClient->getService(service)) { Logger::getLogger()->error("Unable to find service '%s'", southService.c_str()); return false; } string address = service.getAddress(); unsigned short port = service.getPort(); char addressAndPort[80]; snprintf(addressAndPort, sizeof(addressAndPort), "%s:%d", address.c_str(), port); SimpleWeb::Client<SimpleWeb::HTTP> http(addressAndPort); string url = "/fledge/south/setpoint"; try { SimpleWeb::CaseInsensitiveMultimap headers = {{"Content-Type", "application/json"}}; auto res = http.request("PUT", url, payload, headers); if (res->status_code.compare("200 OK")) { Logger::getLogger()->error("Failed to send set point operation to service %s, %s", southService.c_str(), res->status_code.c_str()); Logger::getLogger()->error("Failed Payload: %s", payload.c_str()); return false; } } catch (exception& e) { Logger::getLogger()->error("Failed to send set point operation to service %s, %s", southService.c_str(), e.what()); return false; } return true; } catch (exception &e) { Logger::getLogger()->error("Failed to send set point operation to service %s, %s", southService.c_str(), e.what()); return false; } } /** * Send to the control dispatcher service */ bool NorthService::sendToDispatcher(const string& path, const string& payload) { Logger::getLogger()->debug("Dispatch %s with %s", path.c_str(), payload.c_str()); // Send the control message to the south service try { ServiceRecord service("dispatcher"); if (!m_mgtClient->getService(service)) { Logger::getLogger()->error("Unable to find dispatcher service 'Dispatcher'"); return false; } string address = service.getAddress(); unsigned short port = service.getPort(); char addressAndPort[80]; snprintf(addressAndPort, sizeof(addressAndPort), "%s:%d", address.c_str(), port); SimpleWeb::Client<SimpleWeb::HTTP> http(addressAndPort); try { SimpleWeb::CaseInsensitiveMultimap headers = {{"Content-Type", "application/json"}}; // Pass North service bearer token to dispatcher string regToken = m_mgtClient->getRegistrationBearerToken(); if (regToken != "") { headers.emplace("Authorization", "Bearer " + regToken); } auto res = http.request("POST", path, payload, headers); if (res->status_code.compare("202 Accepted")) { Logger::getLogger()->error( "Failed to send control operation '%s' to dispatcher service, %s %s", path.c_str(), res->status_code.c_str(), res->content.string().c_str()); Logger::getLogger()->error("Failed Payload: %s", payload.c_str()); return false; } } catch (exception& e) { Logger::getLogger()->error("Failed to send control operation to dispatcher service, %s", e.what()); return false; } return true; } catch (exception &e) { Logger::getLogger()->error("Failed to send control operation to dispatcher service, %s", e.what()); return false; } } /** * Return the control source for control operations. This is used * for pipeline matching. * * @return string The control source */ string NorthService::controlSource() { string source = "\"source\" : \"service\", \"source_name\" : \""; source += m_name; source += "\""; return source; } /** * Raise an alert that we are having issues sending data * * We also write a warning to the system log to aid with debugging */ void NorthService::alertFailures() { string key = "North " + m_name; string message = "Repeated failures to send data via the " + m_name + " north service "; m_mgtClient->raiseAlert(key, message, "normal"); logger->warn("Repeated failures to send data to destination"); } /** * Clear the failure alert for sending data * * We clear the alert from the status bar and write a message to the system * log */ void NorthService::clearFailures() { string key = "North " + m_name; m_mgtClient->clearAlert(key); logger->info("The sending of data has resumed"); } /** * Return the state of the pipeline debugger * * @return string JSON document reporting the state of the pipeline debugger */ string NorthService::debugState() { string rval; rval = "{ "; rval += "\"debugger\" : "; if (m_debugState & DEBUG_ATTACHED) { rval += "\"Attached\","; rval += "\"ingress\" : "; if (m_debugState & DEBUG_SUSPENDED) rval += "\"Suspended\", "; else rval += "\"Running\", "; rval += "\"egress\" : "; if (m_debugState & DEBUG_ISOLATED) rval += "\"Isolated\""; else rval += "\"Storage\""; } else if (m_allowDebugger) { rval += "\"Detached\""; } else { rval += "\"Disabled\""; } rval += "}"; return rval; } /** * Process the setting of allowed features * * @param category The configuration category */ void NorthService::updateFeatures(const ConfigCategory& category) { if (category.itemExists("debugging")) { string s = category.getValue("debugging"); m_allowDebugger = s.compare("true") == 0 ? true : false; if ((m_debugState & DEBUG_ATTACHED) != 0 && m_allowDebugger == false) { // Detach the debugger in case there is an active session detachDebugger(); } } } ================================================ FILE: C/services/north/north_api.cpp ================================================ /** * Fledge north service API * * Copyright (c) 2025 Dianomic Systems * * Author: Mark Riddoch */ #include <north_api.h> #include <north_service.h> #include <rapidjson/document.h> using namespace std; using namespace rapidjson; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; static NorthApi *api = NULL; /** * Wrapper for the PUT attach debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void attachDebuggerWrapper(Response response, Request request) { if (api) api->attachDebugger(response, request); } /** * Wrapper for the PUT detach debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void detachDebuggerWrapper(Response response, Request request) { if (api) api->detachDebugger(response, request); } /** * Wrapper for the PUT set debugger buffer size API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void setDebuggerBufferWrapper(Response response, Request request) { if (api) api->setDebuggerBuffer(response, request); } /** * Wrapper for the GET debugger buffer API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void getDebuggerBufferWrapper(Response response, Request request) { if (api) api->getDebuggerBuffer(response, request); } /** * Wrapper for the PUT debugger isolate API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void isolateDebuggerWrapper(Response response, Request request) { if (api) api->isolateDebugger(response, request); } /** * Wrapper for the PUT debugger suspend API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void suspendDebuggerWrapper(Response response, Request request) { if (api) api->suspendDebugger(response, request); } /** * Wrapper for the PUT step debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void stepDebuggerWrapper(Response response, Request request) { if (api) api->stepDebugger(response, request); } /** * Wrapper for the PUT replay debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void replayDebuggerWrapper(Response response, Request request) { if (api) api->replayDebugger(response, request); } /** * Wrapper for the GET state debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void stateDebuggerWrapper(Response response, Request request) { if (api) api->stateDebugger(response, request); } /** * Wrapper for thread creation that is used to start the API */ static void startService() { api->startServer(); } /** * North API class constructor * * @param service The NorthService class this is the API for */ NorthApi::NorthApi(NorthService *service) : m_service(service), m_thread(NULL) { m_logger = Logger::getLogger(); m_server = new HttpServer(); m_server->config.port = 0; m_server->config.thread_pool_size = 1; // Add the debugger entry points m_server->resource[DEBUG_ATTACH]["PUT"] = attachDebuggerWrapper; m_server->resource[DEBUG_DETACH]["PUT"] = detachDebuggerWrapper; m_server->resource[DEBUG_BUFFER]["POST"] = setDebuggerBufferWrapper; m_server->resource[DEBUG_BUFFER]["GET"] = getDebuggerBufferWrapper; m_server->resource[DEBUG_ISOLATE]["PUT"] = isolateDebuggerWrapper; m_server->resource[DEBUG_SUSPEND]["PUT"] = suspendDebuggerWrapper; m_server->resource[DEBUG_STEP]["PUT"] = stepDebuggerWrapper; m_server->resource[DEBUG_REPLAY]["PUT"] = replayDebuggerWrapper; m_server->resource[DEBUG_STATE]["GET"] = stateDebuggerWrapper; api = this; m_thread = new thread(startService); } /** * Destroy the API. * * Stop the service and wait for the thread to terminate. */ NorthApi::~NorthApi() { if (m_thread) { m_server->stop(); m_thread->join(); delete m_thread; } if (m_server) delete m_server; } /** * Called on the API service thread. Start the listener for HTTP requests */ void NorthApi::startServer() { m_server->start(); } /** * Return the port the service is listening on */ unsigned short NorthApi::getListenerPort() { int max_wait = 10; // Need to make sure the server thread has started while (m_server->getLocalPort() == 0 && max_wait-- > 0) usleep(100); return m_server->getLocalPort(); } /** * Invoke debugger attach on the north plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void NorthApi::attachDebugger(Response response, Request /*request*/) { if (m_service->allowDebugger()) { bool status = m_service->attachDebugger(); if (status) { string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "Failed to attach the debugger to the pipeline" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke debugger detach on the north plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void NorthApi::detachDebugger(Response response, Request /*request*/) { if (m_service->allowDebugger()) { string responsePayload; if (m_service->debuggerAttached()) { m_service->detachDebugger(); responsePayload = QUOTE({ "status" : "ok" }); } else { responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); } m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke set debugger buffer size on the north plugin * * @param response The HTTP response * @param request The HTTP request */ void NorthApi::setDebuggerBuffer(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("size")) { if (doc["size"].IsUint()) { unsigned int size = doc["size"].GetUint(); m_service->setDebuggerBuffer(size); string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'size' should be an unsigned integer" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'size' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke get debugger buffer size on the north plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void NorthApi::getDebuggerBuffer(Response response, Request /*request*/) { if (m_service->allowDebugger()) { string result; if (m_service->debuggerAttached()) { result = m_service->getDebuggerBuffer(); } else { result = QUOTE({"status" : "Debugger is not attached to the service" }); } m_service->respond(response, result); } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke isolate debugger handler on the north plugin * * @param response The HTTP response * @param request The HTTP request */ void NorthApi::isolateDebugger(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("state")) { if (doc["state"].IsString()) { string state = doc["state"].GetString(); if (state.compare("discard") == 0) m_service->isolateDebugger(true); else if (state.compare("store") == 0) m_service->isolateDebugger(false); else { string responsePayload = QUOTE({ "message" : "The value of 'state' should be one of 'discard' or 'store'" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'size' should be a string with either 'discard' or 'store'." }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'state' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke suspend debugger handler on the north plugin * * @param response The HTTP response * @param request The HTTP request */ void NorthApi::suspendDebugger(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("state")) { if (doc["state"].IsString()) { string state = doc["state"].GetString(); if (state.compare("suspend") == 0) m_service->suspendDebugger(true); else if (state.compare("resume") == 0) m_service->suspendDebugger(false); else { string responsePayload = QUOTE({ "message" : "The value of 'state' should be one of 'suspend' or 'resume'" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'size' should be a string with either 'suspend' or 'resume'." }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'state' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke set debugger step command on the north plugin * * @param response The HTTP response * @param request The HTTP request */ void NorthApi::stepDebugger(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("steps")) { if (doc["steps"].IsUint()) { unsigned int steps = doc["steps"].GetUint(); m_service->stepDebugger(steps); string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'steps' should be an unsigned integer" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'steps' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke debugger replay on the north plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void NorthApi::replayDebugger(Response response, Request /*request*/) { if (m_service->allowDebugger()) { string responsePayload; if (m_service->debuggerAttached()) { // TODO Handle pre-requisites if (m_service->replayDebugger()) { responsePayload = QUOTE({ "status" : "ok" }); } else { responsePayload = QUOTE({ "status" : "No data to replay" }); } } else { responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); } m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "Pipeline debugger features have been disabled" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } /** * Invoke debugger state on the north plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void NorthApi::stateDebugger(Response response, Request /*request*/) { string payload = m_service->debugState(); m_service->respond(response, payload); } ================================================ FILE: C/services/north/north_plugin.cpp ================================================ /* * Fledge north service. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <north_plugin.h> #include <config_category.h> #include <logger.h> #include <exception> #include <typeinfo> #include <stdexcept> #include <mutex> using namespace std; // mutex between various plugin methods, since reconfigure changes the handle // object itself and marks previous handle as garbage collectible by Python runtime std::mutex mtx2; /** * Constructor for the class that wraps the north plugin * * Create a set of function points that resolve to the loaded plugin and * enclose in the class. * */ NorthPlugin::NorthPlugin(PLUGIN_HANDLE handle, const ConfigCategory& category) : Plugin(handle) { // Call the init method of the plugin PLUGIN_HANDLE (*pluginInit)(const void *) = (PLUGIN_HANDLE (*)(const void *)) manager->resolveSymbol(handle, "plugin_init"); m_instance = (*pluginInit)(&category); if (!m_instance) { Logger::getLogger()->error("plugin_init returned NULL, cannot proceed"); throw new exception(); } // Setup the function pointers to the plugin pluginSendPtr = (uint32_t (*)(PLUGIN_HANDLE, const std::vector<Reading *>& readings)) manager->resolveSymbol(handle, "plugin_send"); pluginReconfigurePtr = (void (*)(PLUGIN_HANDLE*, const std::string&)) manager->resolveSymbol(handle, "plugin_reconfigure"); pluginShutdownPtr = (void (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginShutdownDataPtr = (string (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginStartPtr = (void (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_start"); pluginStartDataPtr = (void (*)(const PLUGIN_HANDLE, const string& storedData)) manager->resolveSymbol(handle, "plugin_start"); if (hasControl()) { pluginRegisterPtr = (void (*)(const PLUGIN_HANDLE handle, bool ( *write)(char *name, char *value, ControlDestination destination, ...), int (* operation)(char *operation, int paramCount, char *names[], char *parameters[], ControlDestination destination, ...)))manager->resolveSymbol(handle, "plugin_register"); } else { pluginRegisterPtr = NULL; } } NorthPlugin::~NorthPlugin() { } /** * Call the start method in the plugin * with no persisted data */ void NorthPlugin::start() { // Check pluginStart function pointer exists if (this->pluginStartPtr) { this->pluginStartPtr(m_instance); } } /** * Call the start method in the plugin * passing persisted data */ void NorthPlugin::startData(const string& storedData) { // Ccheck pluginStartData function pointer exists if (this->pluginStartDataPtr) { this->pluginStartDataPtr(m_instance, storedData); } } /** * Call the send method in the plugin */ uint32_t NorthPlugin::send(const vector<Reading *>& readings) { lock_guard<mutex> guard(mtx2); try { return this->pluginSendPtr(m_instance, readings); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in north plugin send(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in north plugin send(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the reconfigure method in the plugin */ void NorthPlugin::reconfigure(const string& newConfig) { if (!pluginReconfigurePtr) { /* * The plugin does not support reconfiguration, shutdown * and restart the plugin. */ lock_guard<mutex> guard(mtx2); if (persistData()) { } else { (*pluginShutdownPtr)(m_instance); PLUGIN_HANDLE (*pluginInit)(const void *) = (PLUGIN_HANDLE (*)(const void *)) manager->resolveSymbol(handle, "plugin_init"); ConfigCategory category("new", newConfig); m_instance = (*pluginInit)(&category); } return; } lock_guard<mutex> guard(mtx2); try { this->pluginReconfigurePtr(&m_instance, newConfig); if (!m_instance) { Logger::getLogger()->error("plugin_reconfigure returned NULL, cannot proceed"); throw new exception(); } return; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in north plugin reconfigure(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in north plugin reconfigure(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the shutdown method in the plugin */ void NorthPlugin::shutdown() { if (this->pluginShutdownPtr) { try { return this->pluginShutdownPtr(m_instance); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in north plugin shutdown(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in north plugin shutdown(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } } /** * Call the shutdown method in the plugin * and return plugin data to parsist as JSON string */ string NorthPlugin::shutdownSaveData() { string ret(""); // Check pluginShutdownData function pointer exists if (this->pluginShutdownDataPtr) { ret = this->pluginShutdownDataPtr(m_instance); } return ret; } /** * Call the plugin_register entry point of the plugin if one has been defined */ void NorthPlugin::pluginRegister(bool ( *write)(char *name, char *value, ControlDestination destination, ...), int (* operation)(char *operation, int paramCount, char *names[], char *parameters[], ControlDestination destination, ...)) { if (hasControl() && pluginRegisterPtr) { (*pluginRegisterPtr)(m_instance, write, operation); } } ================================================ FILE: C/services/north-plugin-interfaces/python/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(north-plugin-python-interface) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) # Find source files file(GLOB SOURCES python_plugin_interface.cpp) # Find Python.h 3.x dev/lib package if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include ../../../common/include ../../../services/common/include ../../../services/north/include ../../../thirdparty/rapidjson/include) include_directories(../../../services/common-plugin-interfaces/python/include) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${DLLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) endif() set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/services/north-plugin-interfaces/python/python_plugin_interface.cpp ================================================ /* * Fledge North plugin interface related * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <config_category.h> #include <reading.h> #include <pythonreadingset.h> #include <mutex> #include <north_plugin.h> #include <pyruntime.h> #include <Python.h> #include <python_plugin_common_interface.h> #define SHIM_SCRIPT_NAME "north_shim" using namespace std; extern "C" { extern PLUGIN_INFORMATION *plugin_info_fn(); extern PLUGIN_HANDLE plugin_init_fn(ConfigCategory *); extern void plugin_reconfigure_fn(PLUGIN_HANDLE*, const std::string&); extern void plugin_shutdown_fn(PLUGIN_HANDLE); extern void logErrorMessage(); extern PLUGIN_INFORMATION *Py2C_PluginInfo(PyObject *); // North plugin entry points void plugin_start_fn(PLUGIN_HANDLE handle); uint32_t plugin_send_fn(PLUGIN_HANDLE handle, const std::vector<Reading *>& readings); /** * Function to invoke async 'plugin_send' function in python plugin * * @param plugin_send_module_func Reference to plugin's plugin_send async method * @param handle Plugin handle from plugin_init_fn * @param readingsList Reading list to send */ unsigned int call_plugin_send_coroutine(PyObject *plugin_send_module_func, PLUGIN_HANDLE handle, PyObject *readingsList) { unsigned int numSent=0; std::string fcn = ""; fcn += "def plugin_send_wrapper(handle, readings, plugin_send_module_func):\n"; fcn += " import asyncio\n"; fcn += " loop = asyncio.new_event_loop()\n"; fcn += " asyncio.set_event_loop(loop)\n"; fcn += " coroObj = plugin_send_module_func(handle, readings, \"000001\")\n"; fcn += " task = loop.create_task(coroObj)\n"; fcn += " done, _ = loop.run_until_complete(asyncio.wait([task]))\n"; fcn += " numSent = 0\n"; fcn += " for t in done:\n"; fcn += " retCode, lastId, numSent = t.result()\n"; fcn += " return numSent\n"; PyRun_SimpleString(fcn.c_str()); PyObject* mod = PyImport_ImportModule("__main__"); if (mod != NULL) { PyObject* method = PyObject_GetAttrString(mod, "plugin_send_wrapper"); if (method != NULL) { PyObject* arg = Py_BuildValue("OOO", handle, readingsList, plugin_send_module_func); PyObject* pReturn = PyObject_CallObject(method, arg); Logger::getLogger()->debug("%s:%d, pReturn=%p", __FUNCTION__, __LINE__, pReturn); Py_CLEAR(arg); if (pReturn != NULL) { if(PyLong_Check(pReturn)) { numSent = (long)PyLong_AsUnsignedLongMask(pReturn); Logger::getLogger()->debug("numSent=%d", numSent); } else { Logger::getLogger()->warn("plugin_send_wrapper() didn't return a number, returned value is of type %s", (Py_TYPE(pReturn))->tp_name); } Py_CLEAR(pReturn); } else { Logger::getLogger()->debug("%s:%d: pReturn is NULL", __FUNCTION__, __LINE__); if (PyErr_Occurred()) { logErrorMessage(); } } } Py_CLEAR(method); } // Reset error PyErr_Clear(); // Remove references Py_CLEAR(mod); return numSent; } /** * Constructor for PythonPluginHandle */ void *PluginInterfaceInit(const char *pluginName, const char * pluginPathName) { bool initialisePython = false; // Set plugin name, also for methods in common-plugin-interfaces/python gPluginName = pluginName; string fledgePythonDir; string fledgeRootDir(getenv("FLEDGE_ROOT")); fledgePythonDir = fledgeRootDir + "/python"; string northRootPath = fledgePythonDir + string(R"(/fledge/plugins/north/)") + string(pluginName); Logger::getLogger()->debug("%s:%d:, northRootPath=%s", __FUNCTION__, __LINE__, northRootPath.c_str()); // Embedded Python 3.5 program name wchar_t *programName = Py_DecodeLocale(pluginName, NULL); Py_SetProgramName(programName); PyMem_RawFree(programName); PythonRuntime::getPythonRuntime(); Logger::getLogger()->debug("%s:%d", __FUNCTION__, __LINE__); // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("NorthPlugin %s:%d: " "northRootPath=%s, fledgePythonDir=%s, plugin '%s'", __FUNCTION__, __LINE__, northRootPath.c_str(), fledgePythonDir.c_str(), pluginName); // Set Python path for embedded Python 3.x // Get current sys.path - borrowed reference PyObject* sysPath = PySys_GetObject((char *)"path"); PyList_Append(sysPath, PyUnicode_FromString((char *) northRootPath.c_str())); PyList_Append(sysPath, PyUnicode_FromString((char *) fledgePythonDir.c_str())); // Set sys.argv for embedded Python 3.5 int argc = 2; wchar_t* argv[2]; argv[0] = Py_DecodeLocale("", NULL); argv[1] = Py_DecodeLocale(pluginName, NULL); PySys_SetArgv(argc, argv); // 2) Import Python script PyObject *pModule = PyImport_ImportModule(pluginName); // Check whether the Python module has been imported if (!pModule) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("PluginInterfaceInit: cannot import Python 3.5 script " "'%s' from '%s' : plugin '%s'", pluginName, northRootPath.c_str(), pluginName); } else { std::pair<std::map<string, PythonModule*>::iterator, bool> ret; if (pythonModules) { // Add element ret = pythonModules->insert(pair<string, PythonModule*> (string(pluginName), new PythonModule(pModule, initialisePython, string(pluginName), PLUGIN_TYPE_NORTH, // New Python interpteter not set NULL))); } // Check result if (!pythonModules || ret.second == false) { Logger::getLogger()->fatal("%s:%d: python module not added to the map " "of loaded plugins, pModule=%p, plugin '%s'i, aborting.", __FUNCTION__, __LINE__, pModule, pluginName); Py_CLEAR(pModule); return NULL; } else { Logger::getLogger()->debug("%s:%d: python module loaded successfully, pModule=%p, plugin '%s'", __FUNCTION__, __LINE__, pModule, pluginName); } } // Release GIL PyGILState_Release(state); return pModule; } /** * Returns function pointer that can be invoked to call '_sym' function * in python plugin */ void* PluginInterfaceResolveSymbol(const char *_sym, const string& name) { string sym(_sym); if (!sym.compare("plugin_info")) return (void *) plugin_info_fn; else if (!sym.compare("plugin_init")) return (void *) plugin_init_fn; else if (!sym.compare("plugin_shutdown")) return (void *) plugin_shutdown_fn; else if (!sym.compare("plugin_reconfigure")) return (void *) plugin_reconfigure_fn; else if (!sym.compare("plugin_start")) return (void *) plugin_start_fn; else if (!sym.compare("plugin_send")) return (void *) plugin_send_fn; else { Logger::getLogger()->fatal("PluginInterfaceResolveSymbol can not find symbol '%s' " "in the North Python plugin interface library, loaded plugin '%s'", _sym, name.c_str()); return NULL; } } /** * Function to invoke 'plugin_start' function in python plugin * * @param handle Plugin handle from plugin_init_fn */ void plugin_start_fn(PLUGIN_HANDLE handle) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_start_fn: " "handle is NULL"); return; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_start_fn, handle '%p'", handle); return; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_start(): " "pModule is NULL, plugin handle '%p'", handle); return; } PyObject* pFunc; // Take GIL PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_start"); if (!pFunc) { Logger::getLogger()->info("Cannot find 'plugin_start' method " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->info("Cannot call method 'plugin_start' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); // Release GIL PyGILState_Release(state); return; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, "O", handle); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->warn("Called python script method plugin_start : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } // Remore result object Py_CLEAR(pReturn); // Release GIL PyGILState_Release(state); } /** * Function to invoke 'plugin_send' function in python plugin * * @param handle Plugin handle from plugin_init_fn * @param readings Vector of readings data to send * * NOTE: currently doesn't work with async plugin_send */ uint32_t plugin_send_fn(PLUGIN_HANDLE handle, const std::vector<Reading *>& readings) { uint32_t numReadingsSent = 0UL; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_send_fn: " "handle is NULL"); return numReadingsSent; } if (!pythonHandles) { // Plugin name can not be logged here Logger::getLogger()->error("pythonModules map is NULL " "in plugin_send_fn, handle '%p'", handle); return numReadingsSent; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_send(): " "pModule is NULL, plugin handle '%p'", handle); return numReadingsSent; } // We have plugin name string pName = it->second->m_name; PyObject* pFunc; // Take GIL PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_send"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_send' " "method in loaded python module '%s'", pName.c_str()); PyGILState_Release(state); return numReadingsSent; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method plugin_send" "in loaded python module '%s'", pName.c_str()); Py_CLEAR(pFunc); // Release GIL PyGILState_Release(state); return numReadingsSent; } // 1. create a ReadingSet ReadingSet set(&readings); // 2. create a PythonReadingSet object PythonReadingSet *pyReadingSet = (PythonReadingSet *) &set; // 3. create PyObject PyObject* readingsList = pyReadingSet->toPython(true); numReadingsSent = call_plugin_send_coroutine(pFunc, handle, readingsList); Logger::getLogger()->debug("C2Py: plugin_send_fn():L%d: filtered readings sent %d", __LINE__, numReadingsSent); set.clear(); // to avoid deletion of contained Reading objects; they are subsequently accessed in calling function DataSender::send() // Remove python object Py_CLEAR(readingsList); Py_CLEAR(pFunc); // Release GIL PyGILState_Release(state); // Return the number of readings sent return numReadingsSent; } }; ================================================ FILE: C/services/notification-plugin-interfaces/python/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(notification-plugin-python-interface) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) # Find source files file(GLOB SOURCES python_plugin_interface.cpp) # Find Python.h 3.x dev/lib package if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include ../../../common/include ../../../services/common/include ../../../services/south/include ../../../thirdparty/rapidjson/include) include_directories(../../../services/common-plugin-interfaces/python/include) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${DLLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) endif() set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/services/notification-plugin-interfaces/python/python_plugin_interface.cpp ================================================ /* * Fledge south plugin interface related * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <logger.h> #include <config_category.h> #include <reading.h> #include <mutex> #include <plugin_handle.h> #include <pyruntime.h> #include <Python.h> #include <python_plugin_common_interface.h> #include <pythonreadingset.h> #include <base64dpimage.h> #include <base64databuffer.h> #define PY_ARRAY_UNIQUE_SYMBOL PyArray_API_FLEDGE #include <numpy/npy_common.h> #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <numpy/ndarraytypes.h> #include <numpy/ndarrayobject.h> #undef NUMPY_IMPORT_ARRAY_RETVAL #define NUMPY_IMPORT_ARRAY_RETVAL 0 using namespace std; extern "C" { extern PLUGIN_INFORMATION *Py2C_PluginInfo(PyObject *); extern void logErrorMessage(); extern bool numpyImportError; extern PLUGIN_INFORMATION *plugin_info_fn(); extern PLUGIN_HANDLE plugin_init_fn(ConfigCategory *); extern void plugin_shutdown_fn(PLUGIN_HANDLE); // Reconfigure entry point for rule and delivery plugings void notification_plugin_reconfigure_fn(PLUGIN_HANDLE, const std::string&); // Notification rule plugin entry points std::string plugin_triggers_fn(PLUGIN_HANDLE handle); bool plugin_eval_fn(PLUGIN_HANDLE handle, const std::string& assetValues); std::string plugin_reason_fn(PLUGIN_HANDLE handle); //Notificztion deelivery plugin entry point bool plugin_deliver_fn(PLUGIN_HANDLE handle, const std::string& deliveryName, const std::string& notificationName, const std::string& triggerReason, const std::string& customMessage); // Substitute string values with known data types bool substituteObjects(PyObject *data, vector<PyObject*> &removeObjects); /** * Constructor for PythonPluginHandle * - Set sys.path and sys.argv * - Set plugin_type (notificationRule or notificationDelivery * - Load Python module for the plugin * * @param pluginName The plugin name to load * @param pluginPathName The plugin pathname * @return Python object pointer * of loaded Python shim file * or NULL for errors */ void *PluginInterfaceInit(const char *pluginName, const char * pluginPathName) { bool initPython = false; // Set plugin name for common-plugin-interfaces/python gPluginName = pluginName; // Extract plugin type from path string pluginType = strstr(pluginPathName, PLUGIN_TYPE_NOTIFICATION_RULE) != NULL ? PLUGIN_TYPE_NOTIFICATION_RULE : PLUGIN_TYPE_NOTIFICATION_DELIVERY; string appPythonDir; string appRootDir(getenv("FLEDGE_ROOT")); appPythonDir = appRootDir + "/python"; string notificationsRootPath = appPythonDir + string("/fledge/plugins/") + pluginType + "/" + string(pluginName); Logger::getLogger()->error("%s:%d:, filtersRootPath=%s", __FUNCTION__, __LINE__, notificationsRootPath.c_str()); // Get Python runtime PythonRuntime::getPythonRuntime(); // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("NotificationPlugin PluginInterfaceInit %s:%d: " "appPythonDir=%s, plugin '%s', type '%s'", __FUNCTION__, __LINE__, appPythonDir.c_str(), pluginName, pluginType.c_str()); // Set Python path for embedded Python 3.x // Get current sys.path - borrowed reference PyObject* sysPath = PySys_GetObject((char *)"path"); PyList_Append(sysPath, PyUnicode_FromString((char *)notificationsRootPath.c_str())); // Set sys.argv for embedded Python 3.5 int argc = 2; wchar_t* argv[2]; argv[0] = Py_DecodeLocale("", NULL); argv[1] = Py_DecodeLocale(pluginName, NULL); PySys_SetArgv(argc, argv); // Import plugin module PyObject *pModule = PyImport_ImportModule(pluginName); Logger::getLogger()->debug("%s:%d: pluginName=%s, type '%s', pModule=%p", __FUNCTION__, __LINE__, pluginName, pluginType.c_str(), pModule); // Check whether the Python module has been imported if (!pModule) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("NotificationPlugin PluginInterfaceInit: " "cannot import Python module file " "from '%s', plugin '%s', type '%s'", pluginPathName, pluginName, pluginType.c_str()); } else { std::pair<std::map<string, PythonModule*>::iterator, bool> ret; PythonModule* newModule = NULL; if (pythonModules) { // Add module into pythonModules, pluginName is the key if ((newModule = new PythonModule(pModule, initPython, string(pluginName), pluginType, NULL)) == NULL) { // Release lock PyGILState_Release(state); Logger::getLogger()->fatal("plugin_handle: plugin_init(): " "failed to create Python module " "object, plugin '%s', type '%s'", pluginName, pluginType.c_str()); return NULL; } // Add module to the list of loaded modules ret = pythonModules->insert(pair<string, PythonModule*> (string(pluginName), newModule)); } // Check result if (!pythonModules || ret.second == false) { Logger::getLogger()->fatal("%s:%d: python module " "not added to the map " "of loaded plugins, " "pModule=%p, plugin '%s', type '%s', aborting.", __FUNCTION__, __LINE__, pModule, pluginName, pluginType.c_str()); // Cleanup Py_CLEAR(pModule); pModule = NULL; delete newModule; newModule = NULL; } else { Logger::getLogger()->debug("%s:%d: python module " "successfully loaded, " "pModule=%p, plugin '%s', type '%s'", __FUNCTION__, __LINE__, pModule, pluginName, pluginType.c_str()); } } // Release locks PyGILState_Release(state); // Return new Python module or NULL return pModule; } /** * Returns function pointer that can be invoked to call '_sym' function * in python plugin * * @param _sym Symbol name * @param name Plugin name * @return function pointer to be invoked */ void* PluginInterfaceResolveSymbol(const char *_sym, const string& name) { string sym(_sym); if (!sym.compare("plugin_info")) return (void *) plugin_info_fn; else if (!sym.compare("plugin_init")) return (void *) plugin_init_fn; else if (!sym.compare("plugin_shutdown")) return (void *) plugin_shutdown_fn; else if (!sym.compare("plugin_reconfigure")) return (void *) notification_plugin_reconfigure_fn; else if (!sym.compare("plugin_triggers")) return (void *) plugin_triggers_fn; else if (!sym.compare("plugin_eval")) return (void *) plugin_eval_fn; else if (!sym.compare("plugin_reason")) return (void *) plugin_reason_fn; else if (!sym.compare("plugin_deliver")) return (void *) plugin_deliver_fn; else { Logger::getLogger()->fatal("PluginInterfaceResolveSymbol can not find symbol '%s' " "in the Notification Python plugin interface library, " "loaded plugin '%s'", _sym, name.c_str()); return NULL; } } /** * Invoke 'plugin_triggers' function in notification rule python plugin * * Returned JSON data will be used for notification data subscription * to Fledge storage service * * @param handle The plugin handle from plugin_init_fn * @return JSON string with array of * asset name and windom data evaluation */ string plugin_triggers_fn(PLUGIN_HANDLE handle) { string ret = "{\"triggers\" : []}"; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_triggers(): " "handle is NULL"); return ret; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_triggers_fn, handle '%p'", handle); return ret; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_triggers(): " "pModule is NULL, plugin handle '%p'", handle); return ret; } std::mutex mtx; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); PyObject* pFunc; // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_triggers"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_triggers' method " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return ret; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_triggers' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return ret; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, "O", handle); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method 'plugin_triggers' : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } // Return C++ string ret = string(json_dumps(pReturn)); // Remove objects Py_CLEAR(pReturn); PyGILState_Release(state); return ret; } /** * Function to invoke 'plugin_reason' function in notification rule python plugin * * @param handle The plugin handle from plugin_init_fn * @return JSON string with trigger reason */ std::string plugin_reason_fn(PLUGIN_HANDLE handle) { string ret = "{\"reason\" : \"errored\"}"; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_reason(): " "handle is NULL"); return ret; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_reason_fn, handle '%p'", handle); return NULL; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_reason(): " "pModule is NULL, plugin handle '%p'", handle); return ret; } std::mutex mtx; lock_guard<mutex> guard(mtx); PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_reason"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_reason' method " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return ret; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_reason' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return ret; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, "O", handle); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method 'plugin_reason' : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } // Get Python object ret = string(json_dumps(pReturn)); // REmove objects Py_CLEAR(pReturn); PyGILState_Release(state); return ret; } /** * Function to invoke 'plugin_eval' function in notification rule python plugin * * @param handle The plugin handle from plugin_init_fn * @param assetValues JSON string with asset data to evaluate * @return True if rule evaluation triggers, false otherwise */ bool plugin_eval_fn(PLUGIN_HANDLE handle, const std::string& assetValues) { bool ret = false; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_eval(): " "handle is NULL"); return ret; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_eval_fn, handle '%p'", handle); return NULL; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_eval(): " "pModule is NULL, plugin handle '%p'", handle); return ret; } std::mutex mtx; lock_guard<mutex> guard(mtx); PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_eval"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_eval' method " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return ret; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_eval' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return ret; } // Call Python method passing an object and the data as C string PyObject *evalData = json_loads(assetValues.c_str()); vector<PyObject*> removeObjects; // Replace content of some known string data: // DPImage substituteObjects(evalData, removeObjects); // Call plugin_eval PyObject* pReturn = PyObject_CallFunction(pFunc, "OO", handle, evalData); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method 'plugin_eval' : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } else { // check bool abd return true or false if (PyBool_Check(pReturn)) { ret = PyObject_IsTrue(pReturn); } } // REmove objects Py_CLEAR(evalData); Py_CLEAR(pReturn); // Remove any allocated object in substituteObjects() for (auto it = removeObjects.begin(); it != removeObjects.end(); ++it) { Py_CLEAR(*it); } removeObjects.clear(); PyGILState_Release(state); return ret; } /** * Function to invoke 'plugin_reconfigure' function in python plugin * * @param handle The plugin handle from plugin_init_fn * @param config The new configuration, as string */ void notification_plugin_reconfigure_fn(PLUGIN_HANDLE handle, const std::string& config) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_reconfigure(): " "handle is NULL"); return; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_reconfigure_fn, handle '%p'", handle); return; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_reconfigure(): " "pModule is NULL, plugin handle '%p'", handle); return; } std::mutex mtx; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("plugin_handle: plugin_reconfigure(): " "pModule=%p, handle=%p, plugin '%s'", it->second->m_module, handle, it->second->m_name.c_str()); PyObject* pFunc; // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_reconfigure"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_reconfigure' " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_reconfigure' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } Logger::getLogger()->debug("plugin_reconfigure with %s", config.c_str()); // Create Python object from string PyObject *config_dict = json_loads(config.c_str()); // Call Python method passing the Python object PyObject* pReturn = PyObject_CallFunction(pFunc, "OO", handle, config_dict); Py_CLEAR(pFunc); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_reconfigure " ": error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } else { PyObject* tmp = (PyObject *)handle; // Check current handle is Dict and pReturn is a Dict too if (PyDict_Check(tmp) && PyDict_Check(pReturn)) { // Clear Dict content PyDict_Clear(tmp); // Populate hadnle Dict with new data in pReturn PyDict_Update(tmp, pReturn); // Remove pReturn ojbect Py_CLEAR(pReturn); Logger::getLogger()->debug("plugin_handle: plugin_reconfigure(): " "got updated handle from python plugin=%p, plugin '%s'", handle, it->second->m_name.c_str()); } else { Logger::getLogger()->error("plugin_handle: plugin_reconfigure(): " "got object type '%s' instead of Python Dict, " "python plugin=%p, plugin '%s'", Py_TYPE(pReturn)->tp_name, handle, it->second->m_name.c_str()); } } PyGILState_Release(state); } /** * Function to invoke 'plugin_deliver' function in * notification deliveryi python plugin * * @param handle The plugin handle from plugin_init_fn * @param handle The plugin handle returned from plugin_init * @param deliveryName The delivery category name * @param notificationName The notification name * @param triggerReason The trigger reason for notification * @param customMessage The message from notification * @return True is notification has been delivered, * false otherwise */ bool plugin_deliver_fn(PLUGIN_HANDLE handle, const std::string& deliveryName, const std::string& notificationName, const std::string& triggerReason, const std::string& customMessage) { bool ret = false; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_deliver(): " "handle is NULL"); return ret; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_deliver_fn, handle '%p'", handle); return ret; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_deliver(): " "pModule is NULL, plugin handle '%p'", handle); return ret; } std::mutex mtx; lock_guard<mutex> guard(mtx); PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_deliver"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_deliver' method " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return ret; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_deliver' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return ret; } // Transform triggerReason into a Python object PyObject *reason = json_loads(triggerReason.c_str()); // Call Python method passing an object and the data ac C string bu // triggerReason as a Python object PyObject* pReturn = PyObject_CallFunction(pFunc, "OssOs", handle, deliveryName.c_str(), notificationName.c_str(), reason, customMessage.c_str()); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method 'plugin_deliver' : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } else { // check bool abd return true or false if (PyBool_Check(pReturn)) { ret = PyObject_IsTrue(pReturn); } } // Remove objects Py_CLEAR(reason); Py_CLEAR(pReturn); PyGILState_Release(state); return ret; } /** * Substitute value for a second level dict in the Pythin object * if DPImage string is found * * { * "TC1" : { * "width" : 256, * "height" : 256, * "depth" : 24, * "img" : "__DPIMAGE:2,2,24_AAAAAAAACAAACAAA" * }, * "timestamp_TC1" : 1643293555.389629 * } * * "img" string value will be substituted by * PyArray_SimpleNewFromData(...) data */ bool substituteObjects(PyObject *data, vector<PyObject*> &removeObjects) { PyObject *dKey, *dValue; Py_ssize_t dPos = 0; // Fetch all Datapoints in 'reading' dict // dKey and dValue are borrowed references while (PyDict_Next(data, &dPos, &dKey, &dValue)) { if (PyDict_Check(dValue)) { PyObject *iKey, *iValue; Py_ssize_t iPos = 0; while (PyDict_Next(dValue, &iPos, &iKey, &iValue)) { if (PyUnicode_Check(iValue)) { string str = PyUnicode_AsUTF8(iValue); string key = PyUnicode_AsUTF8(iKey); if (str[0] == '_' && str[1] == '_') { size_t pos = str.find_first_of(':'); if (str.compare(2, 7, "DPIMAGE") == 0) { PyObject *newImage = NULL; DPImage *image = new Base64DPImage(str.substr(pos + 1)); Logger::getLogger()->debug("Inner key '%s' will be " "substituted with a DPImage of %dx%d@%d", key.c_str(), image->getHeight(), image->getWidth(), image->getDepth()); // Initialise Nunpy array import_array(); if (image->getDepth() == 24) { npy_intp dim[3]; dim[0] = image->getHeight(); dim[1] = image->getWidth(); dim[2] = 3; enum NPY_TYPES type = NPY_UBYTE; // Create Python array wrapper around image data newImage = PyArray_SimpleNewFromData(3, dim, type, image->getData()); } else { npy_intp dim[2]; dim[0] = image->getHeight(); dim[1] = image->getWidth(); enum NPY_TYPES type; bool createImage = true; switch (image->getDepth()) { case 8: type = NPY_UBYTE; break; case 16: type = NPY_UINT16; break; case 32: type = NPY_UINT32; break; case 64: type = NPY_UINT64; break; default: createImage = false; break; } if (createImage) { // Create Python array wrapper around image data newImage = PyArray_SimpleNewFromData(2, dim, type, image->getData()); } } if (newImage) { // Replace value PyDict_SetItem(dValue, iKey, newImage); // Add object to remove vector removeObjects.push_back(newImage); } // Delete DPImage object delete(image); } if (str.compare(2, 10, "DATABUFFER") == 0) { PyObject *newImage = NULL; DataBuffer *dbuf = new Base64DataBuffer(str.substr(pos + 1)); npy_intp dim = dbuf->getItemCount(); enum NPY_TYPES type; bool createImage = true; switch (dbuf->getItemSize()) { case 1: type = NPY_UBYTE; break; case 2: type = NPY_UINT16; break; case 4: type = NPY_UINT32; break; case 8: type = NPY_UINT64; break; default: createImage = false; break; } // Initialise Nunpy array import_array(); if (createImage) { // Create Python array wrapper around image data newImage = PyArray_SimpleNewFromData(1, &dim, type, dbuf->getData()); if (newImage) { // Replace value PyDict_SetItem(dValue, iKey, newImage); // Add object to remove vector removeObjects.push_back(newImage); } } // Delete Databiffer object delete(dbuf); } } } } } } return true; } }; // End of extern C ================================================ FILE: C/services/south/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (South) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb -DPy_DEBUG") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(CMAKE_CXX_FLAGS_PROFILING "-O2 -pg") set(DLLIB -ldl) set(UUIDLIB -luuid) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) set(EXEC fledge.services.south) include_directories(. include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include ../common/include ../../common/include) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() # Find python3.x dev/lib package find_package(PkgConfig REQUIRED) if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development NumPy) endif() file(GLOB south_src "*.cpp") # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS} ${Python3_NUMPY_INCLUDE_DIRS}) endif() link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(${EXEC} ${south_src} ${common_src} ${services_src}) target_link_libraries(${EXEC} ${Boost_LIBRARIES}) target_link_libraries(${EXEC} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${EXEC} ${DLLIB}) target_link_libraries(${EXEC} ${UUIDLIB}) target_link_libraries(${EXEC} ${COMMON_LIB}) target_link_libraries(${EXEC} ${SERVICE_COMMON_LIB}) install(TARGETS ${EXEC} RUNTIME DESTINATION fledge/services) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(${EXEC} ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(${EXEC} ws2_32 wsock32) endif() endif() # Set profiling flags if 'Profiling' build if(CMAKE_BUILD_TYPE STREQUAL "Profiling") message("Building in Profiling mode") set_target_properties(${EXEC} PROPERTIES COMPILE_FLAGS "${CMAKE_CXX_FLAGS_PROFILING}") # define 'PROFILING' flag used by service to change directory target_compile_definitions(${EXEC} PRIVATE PROFILING=1) set(CMAKE_SHARED_LINKED_FLAGS "${CMAKE_SHARED_LINKED_FLAGS} -O2 -pg") target_link_libraries(${EXEC} -O2 -pg) endif() ================================================ FILE: C/services/south/README.rst ================================================ .. |br| raw:: html <br /> ********************* Fledge South Service ********************* This is the south service of the Fledge platform written in C. This service is responsible for gathering readings and sending then to the Fledge buffer for storage. |br| |br| Building ======== The Storage service is built using cmake, to build the Storage service: :: mkdir build cd build cmake .. make This will create the executable file ``south`` service. Use the command ``make install`` to install in the default location, note you will need permission on the installation directory or use the sudo command. Pass the option *DESTDIR=* to set your own destination into which to install the Storage service. Build the plugins by going to the directory *C/plugins/south* and follow the instructions in each of the plugin directories. |br| |br| Prerequisites ============= To build the South service the machine must have installed the *cmake* system, *make* and *g++*, plus the libraries for the South plugin, e.g. the boost libraries On Ubuntu based Linux distributions these can be installed with *apt-get*: :: apt-get install libboost-dev libboost-system-dev libboost-thread-dev apt-get install cmake g++ make |br| |br| Running ======= The South service may be run in daemon mode or interactively by use of the *-d* command line argument. The South service will register with the core to allow the core to monitor the South service and to allow the South storage to find the Storage service. It assumes the core is located on the same machine. This can however be overridden by the use of the command line argument *--port=* and *--address=* to set the port and address of the core microservice. The South service will look for South plugins in the current directory or in the directory *$FLEDGE_ROOT/plugins/south*. |br| |br| ================================================ FILE: C/services/south/include/defaults.h ================================================ #ifndef _DEFAULTS_H #define _DEFAULTS_H /* * Fledge reading ingest. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ // The maximum value a user will be allowed to set the maxSendLatency config item expressed in mS #define MAXSENDLATENCY 600000 // 10 minutes // The default advanced configuration items to add to the category static struct { const char *name; const char *displayName; const char *description; const char *type; const char *value; } defaults[] = { { "maxSendLatency", "Maximum Reading Latency (mS)", "Maximum time to spend filling buffer before sending", "integer", "5000" }, { "bufferThreshold", "Maximum buffered Readings", "Number of readings to buffer before sending", "integer", "100" }, { "throttle", "Throttle", "Enable flow control by reducing the poll rate", "boolean", "false" }, { "readingsPerSec", "Reading Rate", "Number of readings to generate per interval", "integer", "1" }, { "assetTrackerInterval", "Asset Tracker Update", "Number of milliseconds between updates of the asset tracker information", "integer", "500" }, { NULL, NULL, NULL, NULL, NULL } }; #endif ================================================ FILE: C/services/south/include/ingest.h ================================================ #ifndef _INGEST_H #define _INGEST_H /* * Fledge reading ingest. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto, Amandeep Singh Arora */ #include <storage_client.h> #include <reading.h> #include <logger.h> #include <vector> #include <queue> #include <thread> #include <chrono> #include <mutex> #include <sstream> #include <unordered_set> #include <condition_variable> #include <filter_plugin.h> #include <filter_pipeline.h> #include <asset_tracking.h> #include <service_handler.h> #include <set> #include <perfmonitors.h> #define SERVICE_NAME "Fledge South" #define INGEST_SUFFIX "-Ingest" // Suffix for per service ingest statistic #define FLUSH_STATS_INTERVAL 5 // Period between flushing of stats to storage (seconds) #define STATS_UPDATE_FAIL_THRESHOLD 10 // After this many update fails try creating new stats #define DEPRECATED_CACHE_AGE 600 // Maximum allowed aged of the deprecated asset cache /* * Constants related to flow control for async south services. * */ #define AFC_SLEEP_INCREMENT 20 // Number of milliseconds to wait for readings to drain #define AFC_SLEEP_MAX 200 // Maximum sleep tiem in ms between tests #define AFC_MAX_WAIT 5000 // Maximum amount of time we wait for the queue to drain class IngestRate; // Enum for service buffering type enum class ServiceBufferingType { UNLIMITED, LIMITED }; // Enum for discard policy enum class DiscardPolicy { DISCARD_OLDEST, REDUCE_FIDELITY, DISCARD_NEWEST }; #define SERVICE_BUFFER_BUFFER_TYPE_DEFAULT ServiceBufferingType::UNLIMITED #define SERVICE_BUFFER_DISCARD_POLICY_DEFAULT DiscardPolicy::DISCARD_OLDEST #define SERVICE_BUFFER_SIZE_DEFAULT 1000 #define SERVICE_BUFFER_SIZE_MIN 1000 /** * The ingest class is used to ingest asset readings. * It maintains a queue of readings to be sent to storage, * these are sent using a background thread that regularly * wakes up and sends the queued readings. */ class Ingest : public ServiceHandler { public: Ingest(StorageClient& storage, const std::string& serviceName, const std::string& pluginName, ManagementClient *mgmtClient); ~Ingest(); void ingest(const Reading& reading); void ingest(const std::vector<Reading *> *vec); void start(long timeout, unsigned int threshold); bool running(); bool isStopping(); bool isRunning() { return !m_shutdown; }; void processQueue(); void waitForQueue(); size_t queueLength(); void updateStats(void); int createStatsDbEntry(const std::string& assetName); bool loadFilters(const std::string& categoryName); static void passToOnwardFilter(OUTPUT_HANDLE *outHandle, READINGSET* readings); static void useFilteredData(OUTPUT_HANDLE *outHandle, READINGSET* readings); void setTimeout(const long timeout) { m_timeout = timeout; }; void setThreshold(const unsigned int threshold) { m_queueSizeThreshold = threshold; }; void configChange(const std::string&, const std::string&); void configChildCreate(const std::string& , const std::string&, const std::string&){}; void configChildDelete(const std::string& , const std::string&){}; void shutdown() {}; // Satisfy ServiceHandler void restart() {}; // Satisfy ServiceHandler void unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, const std::string& assetName, const std::string& event); void unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* currentTuple, const std::string& assetName, const std::string&, const unsigned int&); void setStatistics(const std::string& option); std::string getStringFromSet(const std::set<std::string> &dpSet); void setFlowControl(unsigned int lowWater, unsigned int highWater) { m_lowWater = lowWater; m_highWater = highWater; }; void setResourceLimit(ServiceBufferingType serviceBufferingType, unsigned long serviceBufferSize, DiscardPolicy discardPolicy); void flowControl(); void setPerfMon(PerformanceMonitor *mon) { m_performance = mon; }; void configureRateMonitor(long interval, long factor); // Debugger entry points bool attachDebugger() { if (m_filterPipeline) { m_debuggerAttached = true; return m_filterPipeline->attachDebugger(); } return false; }; void detachDebugger() { if (m_filterPipeline) { m_debuggerAttached = false; m_debuggerBufferSize = 1; m_filterPipeline->detachDebugger(); } }; void setDebuggerBuffer(unsigned int size) { if (m_filterPipeline) { m_debuggerBufferSize = size; m_filterPipeline->setDebuggerBuffer(size); } }; std::string getDebuggerBuffer() { std::string rval; if (m_filterPipeline) rval = m_filterPipeline->getDebuggerBuffer(); return rval; }; void isolate(bool isolate) { std::lock_guard<std::mutex> guard(m_isolateMutex); m_isolate = isolate; }; bool isolated() { std::lock_guard<std::mutex> guard(m_isolateMutex); return m_isolate; }; bool replayDebugger() { if (m_filterPipeline) { return m_filterPipeline->replayDebugger(); } else { return false; } }; private: void signalStatsUpdate() { // Signal stats thread to update stats std::lock_guard<std::mutex> guard(m_statsMutex); m_statsCv.notify_all(); }; void logDiscardedStat() { std::lock_guard<std::mutex> guard(m_statsMutex); m_discardedReadings++; }; long calculateWaitTime(); int createServiceStatsDbEntry(); void discardOldest(); void discardNewest(); void reduceFidelity(); void enforceResourceLimits(); StorageClient& m_storage; long m_timeout; bool m_shutdown; unsigned int m_queueSizeThreshold; bool m_running; std::string m_serviceName; std::string m_pluginName; ManagementClient *m_mgtClient; // New data: queued std::vector<Reading *>* m_queue; std::mutex m_qMutex; std::mutex m_statsMutex; std::mutex m_pipelineMutex; std::thread* m_thread; std::thread* m_statsThread; Logger* m_logger; std::condition_variable m_cv; std::condition_variable m_statsCv; // Data ready to be filtered/sent std::vector<Reading *>* m_data; std::vector<std::vector<Reading *>*> m_resendQueues; std::queue<std::vector<Reading *>*> m_fullQueues; std::mutex m_fqMutex; unsigned int m_discardedReadings; // discarded readings since last update to statistics table FilterPipeline* m_filterPipeline; std::unordered_set<std::string> statsDbEntriesCache; // confirmed stats table entries std::map<std::string, int> statsPendingEntries; // pending stats table entries bool m_highLatency; // Flag to indicate we are exceeding latency request bool m_10Latency; // Latency within 10% time_t m_reportedLatencyTime;// Last tiem we reported high latency int m_failCnt; bool m_storageFailed; int m_storesFailed; int m_statsUpdateFails; enum { STATS_BOTH, STATS_ASSET, STATS_SERVICE } m_statisticsOption; unsigned int m_highWater; unsigned int m_lowWater; AssetTrackingTable *m_deprecated; time_t m_deprecatedAgeOut; time_t m_deprecatedAgeOutStorage; PerformanceMonitor *m_performance; std::mutex m_useDataMutex; IngestRate *m_ingestRate; std::mutex m_isolateMutex; bool m_isolate; bool m_debuggerAttached; unsigned int m_debuggerBufferSize; std::atomic<ServiceBufferingType> m_serviceBufferingType; std::atomic<unsigned int> m_serviceBufferSize; std::atomic<DiscardPolicy> m_discardPolicy; bool m_resourceGovernorActive{false}; // Tracks if the resource governor is active time_t m_lastFidelityReductionTimestamp{0}; // Used for "Reduce Fidelity" }; #endif ================================================ FILE: C/services/south/include/ingest_rate.h ================================================ #ifndef _INGEST_RATE_H #define _INGEST_RATE_H /* * Fledge reading ingest rate. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <vector> #include <queue> #include <chrono> #include <mutex> #include <condition_variable> #include <filter_plugin.h> #include <filter_pipeline.h> #include <asset_tracking.h> #include <service_handler.h> #include <set> #include <ingest.h> #define IGRSAMPLES 10 // The number of samples used to calculate initial average /** * A class used to track and report on the ingest rates of a data stream * * It collects the number of readings ingested in a configurable period, the * period being defined in minutes. It then calculates the mean of the collection * rates and the standard deviation. If the current collection rate differs from * the averaged collection rate by more than a configured number of standard * deviations an alert is raised. A "priming" mechanism is used to require two * consecutive deviations from the norm to be required before an alert is trigger. * This reduces the occurance of false positives caused by transient spikes in colection. * These spikes may occur when heavy operations are performed on the Fledge instance. * * When the rate returns to within the number of defined standard deviations of * the average then the alert is cleared. * * Before alerting is enabled a number of the defined time periods, IGRSAMPLES, * must have passed, during this time an average is calculated and used for the * intial comparision. * * If the user reconfigures the collection rate of the plugin then the accumulated * average is discarded and the process starts again by collecting a new average */ class IngestRate { public: IngestRate(ManagementClient *mgmtClient, const std::string& service); ~IngestRate(); void ingest(unsigned int numberReadings); void periodic(); void updateConfig(int interval, int factor); void relearn(); private: void updateCounters(); private: ManagementClient *m_mgmtClient; std::string m_service; int m_currentInterval; int m_numIntervals; unsigned long m_thisInterval; double m_mean; double m_dsq; unsigned long m_count; double m_factor; std::mutex m_mutex; bool m_alerted; bool m_primed; }; #endif ================================================ FILE: C/services/south/include/south_api.h ================================================ #ifndef _SOUTH_API_H #define _SOUTH_API_H /* * Fledge storage service. * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <logger.h> #include <server_http.hpp> #define SETPOINT "^/fledge/south/setpoint$" #define OPERATION "^/fledge/south/operation$" // Debugger URLs #define DEBUG_ATTACH "^/fledge/south/debug/attach$" #define DEBUG_DETACH "^/fledge/south/debug/detach$" #define DEBUG_BUFFER "^/fledge/south/debug/buffer$" #define DEBUG_ISOLATE "^/fledge/south/debug/isolate$" #define DEBUG_SUSPEND "^/fledge/south/debug/suspend$" #define DEBUG_STEP "^/fledge/south/debug/step$" #define DEBUG_REPLAY "^/fledge/south/debug/replay$" #define DEBUG_STATE "^/fledge/south/debug/state$" class SouthService; typedef std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Response> Response; typedef std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Request> Request; class SouthApi { public: SouthApi(SouthService *); ~SouthApi(); unsigned short getListenerPort(); void setPoint(std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Response> response, std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Request> request); void operation(std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Response> response, std::shared_ptr<SimpleWeb::Server<SimpleWeb::HTTP>::Request> request); void startServer(); // Debugger entry points void attachDebugger(Response response, Request request); void detachDebugger(Response response, Request request); void setDebuggerBuffer(Response response, Request request); void getDebuggerBuffer(Response response, Request request); void isolateDebugger(Response response, Request request); void suspendDebugger(Response response, Request request); void stepDebugger(Response response, Request request); void replayDebugger(Response response, Request request); void stateDebugger(Response response, Request request); private: SimpleWeb::Server<SimpleWeb::HTTP> *m_server; SouthService *m_service; std::thread *m_thread; Logger *m_logger; }; #endif ================================================ FILE: C/services/south/include/south_plugin.h ================================================ #ifndef _SOUTH_PLUGIN #define _SOUTH_PLUGIN /* * Fledge south service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin.h> #include <plugin_manager.h> #include <config_category.h> #include <string> #include <reading_set.h> typedef void (*INGEST_CB)(void *, Reading); typedef void (*INGEST_CB2)(void *, std::vector<Reading *>*); /** * Class that represents a south plugin. * * The purpose of this class is to hide the use of the pointers into the * dynamically loaded plugin and wrap the interface into a class that * can be used directly in the south subsystem. * * This is achieved by having a set of private member variables which are * the pointers to the functions in the plugin, and a set of public methods * that will call these functions via the function pointers. */ class SouthPlugin : public Plugin { public: SouthPlugin(PLUGIN_HANDLE handle, const ConfigCategory& category); ~SouthPlugin(); Reading poll(); ReadingSet* pollV2(); void start(); void reconfigure(const std::string&); void shutdown(); void registerIngest(INGEST_CB, void *); void registerIngestV2(INGEST_CB2, void *); bool isAsync() { return info->options & SP_ASYNC; }; bool hasControl() { return info->options & SP_CONTROL; }; bool persistData() { return info->options & SP_PERSIST_DATA; }; void startData(const std::string& pluginData); std::string shutdownSaveData(); bool write(const std::string& name, const std::string& value); bool operation(const std::string& name, std::vector<PLUGIN_PARAMETER *>& ); private: PLUGIN_HANDLE instance; bool m_started; // Plugin started indicator, for async plugins void (*pluginStartPtr)(PLUGIN_HANDLE); Reading (*pluginPollPtr)(PLUGIN_HANDLE); std::vector<Reading*>* (*pluginPollPtrV2)(PLUGIN_HANDLE); void (*pluginReconfigurePtr)(PLUGIN_HANDLE*, const std::string& newConfig); void (*pluginShutdownPtr)(PLUGIN_HANDLE); void (*pluginRegisterPtr)(PLUGIN_HANDLE, INGEST_CB, void *); void (*pluginRegisterPtrV2)(PLUGIN_HANDLE, INGEST_CB2, void *); std::string (*pluginShutdownDataPtr)(const PLUGIN_HANDLE); void (*pluginStartDataPtr)(PLUGIN_HANDLE, const std::string& pluginData); bool (*pluginWritePtr)(PLUGIN_HANDLE, const std::string& name, const std::string& value); bool (*pluginOperationPtr)(const PLUGIN_HANDLE, const std::string& name, int count, PLUGIN_PARAMETER **parameters); }; #endif ================================================ FILE: C/services/south/include/south_service.h ================================================ #ifndef _SOUTH_SERVICE_H #define _SOUTH_SERVICE_H /* * Fledge south service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <logger.h> #include <south_plugin.h> #include <service_handler.h> #include <config_category.h> #include <ingest.h> #include <filter_plugin.h> #include <plugin_data.h> #include <audit_logger.h> #include <perfmonitors.h> #define MAX_SLEEP 5 // Maximum number of seconds the service will sleep during a poll cycle #define SERVICE_NAME "Fledge South" /* * Control the throttling of poll based plugins * * If the ingest queue grows then we reduce the poll rate, i.e. increase * the interval between poll calls. If the ingest queue then drops below * the threshold set in the advance configuration we then bring the poll * rate back up. */ #define SOUTH_THROTTLE_HIGH_PERCENT 50 // Percentage above buffer threshold where we throttle down #define SOUTH_THROTTLE_LOW_PERCENT 10 // Percentage above buffer threshold where we throttle up #define SOUTH_THROTTLE_PERCENT 10 // The percentage we throttle poll by #define SOUTH_THROTTLE_DOWN_INTERVAL 10 // Interval between throttle down attmepts #define SOUTH_THROTTLE_UP_INTERVAL 15 // Interval between throttle up attempts /** * State bits for the south pipeline debugger */ #define DEBUG_ATTACHED 0x01 #define DEBUG_SUSPENDED 0x02 #define DEBUG_ISOLATED 0x04 class SouthServiceProvider; /** * The SouthService class. This class is the core * of the service that provides south side services * to Fledge. */ class SouthService : public ServiceAuthHandler { public: SouthService(const std::string& name, const std::string& token = ""); virtual ~SouthService(); void start(std::string& coreAddress, unsigned short corePort); void stop(); void shutdown(); void restart(); void configChange(const std::string&, const std::string&); void processConfigChange(const std::string&, const std::string&); void configChildCreate(const std::string&, const std::string&, const std::string&){}; void configChildDelete(const std::string&, const std::string&){}; bool isRunning() { return !m_shutdown; }; bool setPoint(const std::string& name, const std::string& value); bool operation(const std::string& name, std::vector<PLUGIN_PARAMETER *>& ); void setDryRun() { m_dryRun = true; }; void handlePendingReconf(); // Debugger Entry point bool attachDebugger() { if (m_ingest) { m_debugState = DEBUG_ATTACHED; return m_ingest->attachDebugger(); } return false; }; void detachDebugger() { if (m_ingest) m_ingest->detachDebugger(); suspendDebugger(false); isolateDebugger(false); m_debugState = 0; }; void setDebuggerBuffer(unsigned int size) { if (m_ingest) m_ingest->setDebuggerBuffer(size); }; std::string getDebuggerBuffer() { if (m_ingest) return m_ingest->getDebuggerBuffer(); return ""; }; void suspendDebugger(bool suspend) { suspendIngest(suspend); if (suspend) m_debugState |= DEBUG_SUSPENDED; else m_debugState &= ~(unsigned int)DEBUG_SUSPENDED; }; void isolateDebugger(bool isolate) { if (m_ingest) m_ingest->isolate(isolate); if (isolate) m_debugState |= DEBUG_ISOLATED; else m_debugState &= ~(unsigned int)DEBUG_ISOLATED; }; void stepDebugger(unsigned int steps) { std::lock_guard<std::mutex> guard(m_suspendMutex); m_steps = steps; } bool replayDebugger() { if (m_ingest) { return m_ingest->replayDebugger(); } else { return false; } }; std::string debugState(); bool debuggerAttached() { return m_debugState & DEBUG_ATTACHED; }; // Global controls bool allowControl() { return m_controlEnabled; }; bool allowDebugger() { return m_debuggerEnabled; }; private: void addConfigDefaults(DefaultConfigCategory& defaults); bool loadPlugin(); int createTimerFd(struct timeval rate); void createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name, std::string current_name); void throttlePoll(); void processNumberList(const ConfigCategory& cateogry, const std::string& item, std::vector<unsigned long>& list); void calculateTimerRate(); bool syncToNextPoll(); bool onDemandPoll(); void checkPendingReconfigure(); void updateFeatures(const ConfigCategory& category); void suspendIngest(bool suspend) { std::lock_guard<std::mutex> guard(m_suspendMutex); m_suspendIngest = suspend; m_steps = 0; }; bool isSuspended() { std::lock_guard<std::mutex> guard(m_suspendMutex); return m_suspendIngest; }; bool willStep() { std::lock_guard<std::mutex> guard(m_suspendMutex); if (m_suspendIngest && m_steps > 0) { m_steps--; return true; } return false; }; void getResourceLimit(); private: std::thread *m_reconfThread; std::deque<std::pair<std::string,std::string>> m_pendingNewConfig; std::mutex m_pendingNewConfigMutex; std::condition_variable m_cvNewReconf; SouthPlugin *southPlugin; Logger *logger; AssetTracker *m_assetTracker; bool m_shutdown; ConfigCategory m_config; ConfigCategory m_configAdvanced; ConfigCategory m_configResourceLimit; unsigned long m_readingsPerSec; // May not be per second, new rate defines time units unsigned int m_threshold; unsigned long m_timeout; Ingest *m_ingest; bool m_throttle; bool m_throttled; unsigned int m_highWater; unsigned int m_lowWater; struct timeval m_lastThrottle; struct timeval m_desiredRate; struct timeval m_currentRate; int m_timerfd; const std::string m_token; unsigned int m_repeatCnt; PluginData *m_pluginData; std::string m_dataKey; bool m_dryRun; bool m_requestRestart; std::string m_rateUnits; enum { POLL_INTERVAL, POLL_FIXED, POLL_ON_DEMAND } m_pollType; std::vector<unsigned long> m_hours; std::vector<unsigned long> m_minutes; std::vector<unsigned long> m_seconds; std::string m_hoursStr; std::string m_minutesStr; std::string m_secondsStr; std::condition_variable m_pollCV; std::mutex m_pollMutex; bool m_doPoll; AuditLogger *m_auditLogger; PerformanceMonitor *m_perfMonitor; bool m_suspendIngest; unsigned int m_steps; std::mutex m_suspendMutex; unsigned int m_debugState; SouthServiceProvider *m_provider; bool m_controlEnabled; bool m_debuggerEnabled; ServiceBufferingType m_serviceBufferingType; unsigned int m_serviceBufferSize; DiscardPolicy m_discardPolicy; }; /** * * A data provider class to return data in the south service ping response */ class SouthServiceProvider : public JSONProvider { public: SouthServiceProvider(SouthService *south) : m_south(south) {}; virtual ~SouthServiceProvider() {}; void asJSON(std::string &json) const { if (m_south) { json = "\"debug\" : " + m_south->debugState(); } }; private: SouthService *m_south; }; #endif ================================================ FILE: C/services/south/ingest.cpp ================================================ /* * Fledge readings ingest. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto, Amandeep Singh Arora */ #include <ingest.h> #include <reading.h> #include <config_handler.h> #include <thread> #include <logger.h> #include <set> #include "string_utils.h" #include <ingest_rate.h> using namespace std; /** * Thread to process the ingest queue and send the data * to the storage layer. */ static void ingestThread(Ingest *ingest) { while (! ingest->isStopping()) { if (ingest->running()) { ingest->waitForQueue(); ingest->processQueue(); } else { std::this_thread::sleep_for(std::chrono::milliseconds(100)); } } } /** * Thread to update statistics table in DB */ static void statsThread(Ingest *ingest) { while (ingest->running()) { ingest->updateStats(); } } /** * Create a row for given assetName in statistics DB table, if not present already * The key checked/created in the table is "<assetName>" * * @param assetName Asset name for the plugin that is sending readings * @return int Return -1 on error, 0 if not required or 1 if the entry exists */ int Ingest::createStatsDbEntry(const string& assetName) { if (m_statisticsOption == STATS_SERVICE) { // No asset stats required return 0; } // Prepare fledge.statistics update string statistics_key = assetName; for (auto & c: statistics_key) c = toupper(c); // SELECT * FROM fledge.statiatics WHERE key = statistics_key const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, statistics_key); Query qKey(wKey); ResultSet* result = 0; try { // Query via storage client result = m_storage.queryTable("statistics", qKey); if (!result->rowCount()) { // Prepare insert values for insertTable InsertValues newStatsEntry; newStatsEntry.push_back(InsertValue("key", statistics_key)); newStatsEntry.push_back(InsertValue("description", string("Readings received from asset ")+assetName)); // Set "value" field for insert using the JSON document object newStatsEntry.push_back(InsertValue("value", 0)); newStatsEntry.push_back(InsertValue("previous_value", 0)); // Do the insert if (!m_storage.insertTable("statistics", newStatsEntry)) { m_logger->error("%s:%d : Insert new row into statistics table failed, newStatsEntry='%s'", __FUNCTION__, __LINE__, newStatsEntry.toJSON().c_str()); delete result; return -1; } } delete result; } catch (...) { m_logger->error("%s:%d : Unable to create new row in statistics table with key='%s'", __FUNCTION__, __LINE__, statistics_key.c_str()); return -1; } return 1; } /** * Create a row for service in the statistics DB table, if not present already * */ int Ingest::createServiceStatsDbEntry() { if (m_statisticsOption == STATS_ASSET) { // No service stats required return 0; } // SELECT * FROM fledge.configuration WHERE key = categoryName const Condition conditionKey(Equals); Where *wKey = new Where("key", conditionKey, m_serviceName + INGEST_SUFFIX); Query qKey(wKey); ResultSet* result = 0; try { // Query via storage client result = m_storage.queryTable("statistics", qKey); if (!result->rowCount()) { // Prepare insert values for insertTable InsertValues newStatsEntry; newStatsEntry.push_back(InsertValue("key", m_serviceName + INGEST_SUFFIX)); newStatsEntry.push_back(InsertValue("description", string("Readings received from service ")+ m_serviceName)); // Set "value" field for insert using the JSON document object newStatsEntry.push_back(InsertValue("value", 0)); newStatsEntry.push_back(InsertValue("previous_value", 0)); // Do the insert if (!m_storage.insertTable("statistics", newStatsEntry)) { m_logger->error("%s:%d : Insert new row into statistics table failed, newStatsEntry='%s'", __FUNCTION__, __LINE__, newStatsEntry.toJSON().c_str()); delete result; return -1; } } delete result; } catch (...) { m_logger->error("%s:%d : Unable to create new row in statistics table with key='%s'", __FUNCTION__, __LINE__, m_serviceName.c_str()); return -1; } return 0; } /** * Update statistics for this south service. Successfully processed * readings are reflected against plugin asset name and READINGS keys. * Discarded readings stats are updated against DISCARDED key. */ void Ingest::updateStats() { unique_lock<mutex> lck(m_statsMutex); if (m_running) // don't wait on condition variable if plugin/ingest is being shutdown m_statsCv.wait_for(lck, std::chrono::seconds(FLUSH_STATS_INTERVAL)); if (m_ingestRate) m_ingestRate->periodic(); if (statsPendingEntries.empty()) { return; } int readings=0; vector<pair<ExpressionValues *, Where *>> statsUpdates; string key; const Condition conditionStat(Equals); for (auto it = statsPendingEntries.begin(); it != statsPendingEntries.end(); ++it) { if (statsDbEntriesCache.find(it->first) == statsDbEntriesCache.end()) { if (createStatsDbEntry(it->first) > 0) { statsDbEntriesCache.insert(it->first); } } if (it->second) { if (m_statisticsOption == STATS_BOTH || m_statisticsOption == STATS_ASSET) { // Prepare fledge.statistics update key = it->first; for (auto & c: key) c = toupper(c); // Prepare "WHERE key = name Where *wPluginStat = new Where("key", conditionStat, key); // Prepare value = value + inc ExpressionValues *updateValue = new ExpressionValues; updateValue->push_back(Expression("value", "+", (int) it->second)); statsUpdates.emplace_back(updateValue, wPluginStat); } readings += it->second; } } if (readings) { Where *wPluginStat = new Where("key", conditionStat, "READINGS"); ExpressionValues *updateValue = new ExpressionValues; updateValue->push_back(Expression("value", "+", (int) readings)); statsUpdates.emplace_back(updateValue, wPluginStat); if (m_statisticsOption == STATS_BOTH || m_statisticsOption == STATS_SERVICE) { Where *wPluginStat = new Where("key", conditionStat, m_serviceName + INGEST_SUFFIX); ExpressionValues *updateValue = new ExpressionValues; updateValue->push_back(Expression("value", "+", (int) readings)); statsUpdates.emplace_back(updateValue, wPluginStat); } } if (m_discardedReadings) { Where *wPluginStat = new Where("key", conditionStat, "DISCARDED"); ExpressionValues *updateValue = new ExpressionValues; updateValue->push_back(Expression("value", "+", (int) m_discardedReadings)); statsUpdates.emplace_back(updateValue, wPluginStat); } try { int rv = m_storage.updateTable("statistics", statsUpdates); if (rv < 0) { if (++m_statsUpdateFails > STATS_UPDATE_FAIL_THRESHOLD) { Logger::getLogger()->warn("Update of statistics failure has persisted, attempting recovery"); createServiceStatsDbEntry(); statsDbEntriesCache.clear(); m_statsUpdateFails = 0; } else if (m_statsUpdateFails == 1) { Logger::getLogger()->warn("Update of statistics failed"); } else { Logger::getLogger()->warn("Update of statistics still failing"); } } else { m_discardedReadings = 0; statsPendingEntries.clear(); } } catch (...) { Logger::getLogger()->info("%s:%d : Statistics table update failed, will retry on next iteration", __FUNCTION__, __LINE__); } for (auto it = statsUpdates.begin(); it != statsUpdates.end(); ++it) { delete it->first; delete it->second; } } /** * Construct an Ingest class to handle the readings queue. * A seperate thread is used to send the readings to the * storage layer based on time. This thread in created in * the constructor and will terminate when the destructor * is called. * * @param storage The storage client to use */ Ingest::Ingest(StorageClient& storage, const std::string& serviceName, const std::string& pluginName, ManagementClient *mgmtClient) : m_storage(storage), m_serviceName(serviceName), m_pluginName(pluginName), m_mgtClient(mgmtClient), m_failCnt(0), m_storageFailed(false), m_storesFailed(0), m_statisticsOption(STATS_BOTH), m_highWater(0), m_isolate(false), m_debuggerAttached(false), m_debuggerBufferSize(1) { m_shutdown = false; m_running = true; m_queue = new vector<Reading *>(); m_logger = Logger::getLogger(); m_data = NULL; m_discardedReadings = 0; m_highLatency = false; // populate asset and storage asset tracking cache AssetTracker *as = AssetTracker::getAssetTracker(); as->populateAssetTrackingCache(m_pluginName, "Ingest"); as->populateStorageAssetTrackingCache(); // Create the stats entry for the service createServiceStatsDbEntry(); m_filterPipeline = NULL; m_deprecated = NULL; m_deprecatedAgeOut = 0; m_deprecatedAgeOutStorage = 0; m_ingestRate = new IngestRate(mgmtClient, serviceName); } /** * Start the ingest threads * * @param timeout Maximum time before sending a queue of readings in milliseconds * @param threshold Length of queue before sending readings */ void Ingest::start(long timeout, unsigned int threshold) { m_timeout = timeout; m_queueSizeThreshold = threshold; m_thread = new thread(ingestThread, this); m_statsThread = new thread(statsThread, this); } /** * Destructor for the Ingest class * * Set's the running flag to false. This will * cause the processing thread to drain the queue * and then exit. * Once this thread has exited the destructor will * return. */ Ingest::~Ingest() { m_shutdown = true; m_running = false; // Cleanup filters { lock_guard<mutex> guard(m_pipelineMutex); if (m_filterPipeline) { m_filterPipeline->setShuttingDown(); m_filterPipeline->cleanupFilters(m_serviceName); // filter's shutdown API could potentially try to feed some new readings using the async ingest mechnanism } } m_cv.notify_one(); m_thread->join(); processQueue(); m_statsCv.notify_one(); m_statsThread->join(); updateStats(); // Cleanup and readings left in the various queues for (auto& reading : *m_queue) { delete reading; } delete m_queue; for (auto& q : m_resendQueues) { for (auto& rq : *q) { delete rq; } delete q; } while (m_fullQueues.size() > 0) { vector<Reading *> *q = m_fullQueues.front(); for (auto& rq : *q) { delete rq; } delete q; m_fullQueues.pop(); } delete m_thread; delete m_statsThread; // Delete filter pipeline { lock_guard<mutex> guard(m_pipelineMutex); if (m_filterPipeline) delete m_filterPipeline; } if (m_deprecated) delete m_deprecated; if (m_ingestRate) delete m_ingestRate; } /** * Check if the ingest process is still running. * This becomes false when the service is shutdown * and is used to allow the queue to drain and then * the processing routine to terminate. */ bool Ingest::running() { lock_guard<mutex> guard(m_pipelineMutex); return m_running; } /** * Check if a shutdown is requested */ bool Ingest::isStopping() { return m_shutdown; } /** * Set the resource limit for the service * * @param serviceBufferingType Buffering type * @param serviceBufferSize Buffer size * @param discardPolicy Discard policy */ void Ingest::setResourceLimit(ServiceBufferingType serviceBufferingType, unsigned long serviceBufferSize, DiscardPolicy discardPolicy) { m_serviceBufferingType = serviceBufferingType; m_serviceBufferSize = serviceBufferSize; m_discardPolicy = discardPolicy; if(m_resourceGovernorActive && (m_serviceBufferingType == ServiceBufferingType::UNLIMITED)) { Logger::getLogger()->info("Resource governor deactivated: normal flow resumed."); m_resourceGovernorActive = false; } m_logger->info("Set resource limit in Ingest: " "Service Buffering Type: '%d', " "Service Buffer Size: '%lu', " "Discard Policy: '%d'.", serviceBufferingType, serviceBufferSize, discardPolicy); } /** * @brief Discard the oldest (first) reading from the queue. * * This method removes the oldest reading from the queue when the resource limit * is exceeded. It ensures that the queue length stays within the configured limit. * Warning: Caller should ensure Thread safety. */ void Ingest::discardOldest() { // Check if the queue is not empty if (!m_queue->empty()) { // Delete the oldest (front) reading from the queue delete m_queue->front(); // Remove the oldest reading from the queue m_queue->erase(m_queue->begin()); // Log that a reading was discarded for statistics purposes logDiscardedStat(); } } /** * @brief Discard the newest (last) reading from the queue. * * This method removes the newest reading from the queue when the resource limit * is exceeded. It ensures that the queue length stays within the configured limit. * Warning: Caller should ensure Thread safety. * */ void Ingest::discardNewest() { // Check if the queue is not empty if (!m_queue->empty()) { // Delete the newest (back) reading from the queue delete m_queue->back(); // Remove the newest reading from the queue m_queue->pop_back(); // Log that a reading was discarded for statistics purposes logDiscardedStat(); } } /** * @brief Reduce the fidelity of the queue by discarding every second reading. * * This method reduces the fidelity of the data in the queue by keeping only * readings at even indices and discarding readings at odd indices. It is applied * when the resource limit is exceeded and ensures the queue length stays within * the configured limit. * Warning: Caller should ensure Thread safety. */ void Ingest::reduceFidelity() { // Check if the queue is not empty if (!m_queue->empty()) { // Create a new temporary queue to store reduced-fidelity data std::vector<Reading*> newQueue; // Iterate through the queue, keeping only every second reading for (size_t i = 0; i < m_queue->size(); i++) { if (i % 2 == 0) { // Keep readings at even indices newQueue.push_back(m_queue->at(i)); } else { // Delete readings at odd indices and log the discarded reading delete m_queue->at(i); logDiscardedStat(); } } // Replace the original queue with the reduced-fidelity queue m_queue->swap(newQueue); } } /** * @brief Enforce resource limits on the queue. * * This method ensures that the queue length stays within the configured limits * when the buffering policy is set to "Limited." It applies the configured discard * policy (e.g., Discard Oldest, Discard Newest, Reduce Fidelity) when the queue * length exceeds the specified limit. Logging is performed when the resource * governor activates or deactivates. */ void Ingest::enforceResourceLimits() { // Enforce limits while the queue length exceeds the configured limit if(queueLength() > m_serviceBufferSize) { if(!m_resourceGovernorActive) { // Log that the resource governor is activated Logger::getLogger()->warn("Resource governor activated: enforcing resource limits."); m_resourceGovernorActive = true; } unsigned int orignalQueueLength = queueLength(); while (queueLength() > m_serviceBufferSize) { // Apply the configured discard policy switch (m_discardPolicy) { case DiscardPolicy::DISCARD_OLDEST: discardOldest(); break; case DiscardPolicy::DISCARD_NEWEST: discardNewest(); break; case DiscardPolicy::REDUCE_FIDELITY: reduceFidelity(); break; } } Logger::getLogger()->debug("Resource governor applied: original queue length = %u, reduced queue length = %u.", orignalQueueLength, queueLength()); } // Deactivate the resource governor if the queue length drops below half the limit if (queueLength() <= m_serviceBufferSize / 2 && m_resourceGovernorActive) { Logger::getLogger()->info("Resource governor deactivated: normal flow resumed."); m_resourceGovernorActive = false; } } /** * Add a reading to the reading queue * * @param reading The single reading to ingest */ void Ingest::ingest(const Reading& reading) { vector<Reading *> *fullQueue = 0; if (m_ingestRate) m_ingestRate->ingest(1); { lock_guard<mutex> guard(m_qMutex); m_queue->emplace_back(new Reading(reading)); // Enforce resource limits if (m_serviceBufferingType == ServiceBufferingType::LIMITED) { enforceResourceLimits(); } if (m_queue->size() >= m_queueSizeThreshold || m_running == false) { fullQueue = m_queue; m_queue = new vector<Reading *>; } } if (fullQueue) { lock_guard<mutex> guard(m_fqMutex); m_fullQueues.push(fullQueue); } if (m_fullQueues.size()) m_cv.notify_all(); m_performance->collect("queueLength", (long)queueLength()); } /** * Add a set of readings to the reading queue * * @param vec A vector of readings to ingest */ void Ingest::ingest(const vector<Reading *> *vec) { vector<Reading *> *fullQueue = 0; size_t qSize; unsigned int nFullQueues = 0; if (m_ingestRate) m_ingestRate->ingest(vec->size()); { lock_guard<mutex> guard(m_qMutex); // Get the readings in the set for (auto & rdng : *vec) { m_queue->emplace_back(rdng); } // Enforce resource limits if (m_serviceBufferingType == ServiceBufferingType::LIMITED) { enforceResourceLimits(); } if (m_queue->size() >= m_queueSizeThreshold || m_running == false) { fullQueue = m_queue; m_queue = new vector<Reading *>; } qSize = m_queue->size(); } if (fullQueue) { lock_guard<mutex> guard(m_fqMutex); m_fullQueues.push(fullQueue); nFullQueues = m_fullQueues.size(); } else { lock_guard<mutex> guard(m_fqMutex); nFullQueues = m_fullQueues.size(); } if (nFullQueues != 0 || qSize > m_queueSizeThreshold * 3 / 4) { m_cv.notify_all(); } m_performance->collect("queueLength", (long)queueLength()); m_performance->collect("ingestCount", (long)vec->size()); } /** * Work out how long to wait based on age of oldest queued reading * We do this in a separate function so that we can lock the qMutex * to access the oldest element in the queue * * @return the time to wait */ long Ingest::calculateWaitTime() { long timeout = m_timeout; lock_guard<mutex> guard(m_qMutex); if (!m_queue->empty()) { Reading *reading = (*m_queue)[0]; struct timeval tm, now; reading->getUserTimestamp(&tm); gettimeofday(&now, NULL); long ageMS = (now.tv_sec - tm.tv_sec) * 1000 + (now.tv_usec - tm.tv_usec) / 1000; timeout = m_timeout - ageMS; } return timeout; } /** * Wait for a period of time to allow the queue to build */ void Ingest::waitForQueue() { if (m_fullQueues.size() > 0 || m_resendQueues.size() > 0) return; if (m_running && m_queue->size() < m_queueSizeThreshold) { long timeout = calculateWaitTime(); if (timeout > 0) { mutex mtx; unique_lock<mutex> lck(mtx); m_cv.wait_for(lck,chrono::milliseconds((3 * timeout) / 4)); } } } /** * Process the queue of readings. * * Send them to the storage layer as a block. If the append call * fails requeue the readings for the next transmission. * * In order not to lock the queue for an excessie time a new queue * is created and the old one moved to a local variable. This minimise * the time we hold the queue mutex to the time it takes to swap two * variables. */ void Ingest::processQueue() { do { /* * If we have some data that has been previously filtered but failed to send, * then first try to send that data. */ while (m_resendQueues.size() > 0) { vector<Reading *> *q = *m_resendQueues.begin(); if (m_storage.readingAppend(*q) == false) { if (!m_storageFailed) m_logger->info("Still unable to resend buffered data, leaving on resend queue."); m_storageFailed = true; m_storesFailed++; m_failCnt++; if (m_failCnt > 5) { m_logger->info("Too many failures with block of readings. Removing readings from block"); for (int cnt = 5; cnt > 0 && q->size() > 0; cnt--) { Reading *reading = q->front(); m_logger->info("Remove reading: %s", reading->toJSON().c_str()); delete reading; q->erase(q->begin()); logDiscardedStat(); } m_performance->collect("removedFromQueue", 5); if (q->size() == 0) { delete q; m_resendQueues.erase(m_resendQueues.begin()); } m_failCnt = 0; } } else { m_performance->collect("storedReadings", (long int)(q->size())); if (m_storageFailed) { m_logger->warn("Storage operational after %d failures", m_storesFailed); m_storageFailed = false; m_storesFailed = 0; } m_failCnt = 0; std::map<std::string, int> statsEntriesCurrQueue; AssetTracker *tracker = AssetTracker::getAssetTracker(); if (tracker == nullptr) { Logger::getLogger()->error("Failed to initialize asset tracker."); return; } string lastAsset = ""; int *lastStat = NULL; std::map <std::string , std::set<std::string> > assetDatapointMap; for (vector<Reading *>::iterator it = q->begin(); it != q->end(); ++it) { Reading *reading = *it; string assetName = reading->getAssetName(); assetName = escape(assetName); const std::vector<Datapoint *> dpVec = reading->getReadingData(); std::string temp; std::set<std::string> tempSet; // first sort the individual datapoints // e.g. dp2, dp3, dp1 push them in a set,to make them // dp1,dp2,dp3 for ( auto dp : dpVec) { temp.clear(); temp.append(dp->getName()); tempSet.insert(temp); } temp.clear(); // Push them in a set so as to avoid duplication of datapoints // a reading of d1, d2, d3 and another d2,d3,d1 , second will be discarded // for (auto dp: tempSet) { set<string> &s= assetDatapointMap[assetName]; if (s.find(dp) == s.end()) { s.insert(dp); } } if (lastAsset.compare(assetName)) { AssetTrackingTuple tuple(m_serviceName, m_pluginName, assetName, "Ingest"); // Check Asset record exists AssetTrackingTuple* res = tracker->findAssetTrackingCache(tuple); if (res == NULL) { // Record non in cache, add it tracker->addAssetTrackingTuple(tuple); } else { // Possibly Un-deprecate asset tracking record unDeprecateAssetTrackingRecord(res, assetName, "Ingest"); } lastAsset = assetName; lastStat = &(statsEntriesCurrQueue[assetName]); (*lastStat)++; } else if (lastStat) { (*lastStat)++; } delete reading; } for (auto itr : assetDatapointMap) { std::set<string> &s = itr.second; unsigned int count = s.size(); StorageAssetTrackingTuple storageTuple(m_serviceName, m_pluginName, itr.first, "store", false, "", count); StorageAssetTrackingTuple *ptr = &storageTuple; // Update SAsset Tracker database and cache tracker->updateCache(s, ptr); } delete q; m_resendQueues.erase(m_resendQueues.begin()); unique_lock<mutex> lck(m_statsMutex); for (auto &it : statsEntriesCurrQueue) { statsPendingEntries[it.first] += it.second; } } } { lock_guard<mutex> fqguard(m_fqMutex); if (m_fullQueues.empty()) { if (!m_shutdown) { // Block of code to execute holding the mutex lock_guard<mutex> guard(m_qMutex); std::vector<Reading *> *newQ = new vector<Reading *>; m_data = m_queue; m_queue = newQ; } } else { m_data = m_fullQueues.front(); m_fullQueues.pop(); } } /* * Create a ReadingSet from m_data readings if we have filters. * * At this point the m_data vector is cleared so that the only reference to * the readings is in the ReadingSet that is passed along the filter pipeline * * The final filter in the pipeline will pass the ReadingSet back into the * ingest class where it will repopulate the m_data member. * * We lock the filter pipeline here to prevent it being reconfigured whilst we * process the data. We do this because the qMutex is not good enough here as we * do not hold it, by deliberate policy. As we copy the queue holding the qMutex * and then release it to enable more data to be queued while we process the previous * queue via the filter pipeline and up to the storage layer. */ { lock_guard<mutex> guard(m_pipelineMutex); if (m_filterPipeline && !m_filterPipeline->isShuttingDown()) { PipelineElement *firstFilter = m_filterPipeline->getFirstFilterPlugin(); if (firstFilter) { // Check whether filters are set before calling ingest while (!m_filterPipeline->isReady()) { Logger::getLogger()->warn("Ingest called before " "filter pipeline is ready"); std::this_thread::sleep_for(std::chrono::milliseconds(150)); } ReadingSet *readingSet = new ReadingSet(m_data); m_data->clear(); m_filterPipeline->execute(); // Set the pipeline executing // Pass readingSet to filter chain firstFilter->ingest(readingSet); m_filterPipeline->completeBranch(); // Main branch has completed m_filterPipeline->awaitCompletion(); /* * If filtering removed all the readings then simply clean up m_data and * return. */ if (m_data->size() == 0) { delete m_data; m_data = NULL; return; } } } } /* * Check the first reading in the list to see if we are meeting the * latency configuration we have been set */ if (m_data) { vector<Reading *>::iterator itr = m_data->begin(); if (itr != m_data->cend()) { Reading *firstReading = *itr; struct timeval tmFirst, tmNow, dur; gettimeofday(&tmNow, NULL); firstReading->getUserTimestamp(&tmFirst); timersub(&tmNow, &tmFirst, &dur); long latency = dur.tv_sec * 1000 + (dur.tv_usec / 1000); m_performance->collect("readLatency", latency); if (latency > m_timeout && m_highLatency == false) { m_logger->warn("Current send latency of %ldms exceeds requested maximum latency of %dmS", latency, m_timeout); m_highLatency = true; m_10Latency = false; m_reportedLatencyTime = time(0); } else if (latency <= m_timeout / 1000 && m_highLatency) { m_logger->warn("Send latency now within requested limits"); m_highLatency = false; } else if (m_highLatency && latency > m_timeout + (m_timeout / 10) && time(0) - m_reportedLatencyTime > 60) { // Report again every minute if we are outside the latency // target by more than 10% m_logger->warn("Current send latency of %ldms still significantly exceeds requested maximum latency of %dmS", latency, m_timeout); m_reportedLatencyTime = time(0); } else if (m_highLatency && latency < m_timeout + (m_timeout / 10) && m_10Latency == false) { m_logger->warn("Send latency of %ldms is now less than 10%% from target", latency); m_10Latency = true; } } } /** * 'm_data' vector is ready to be sent to storage service. * * Note: m_data might contain: * - Readings set by the configured service "plugin" * OR * - filtered readings by filter plugins in 'readingSet' object: * 1- values only * 2- some readings removed * 3- New set of readings */ if (m_data && m_data->size()) { if (m_storage.readingAppend(*m_data) == false) { if (!m_storageFailed) m_logger->warn("Failed to write readings to storage layer, queue for resend"); m_storageFailed = true; m_storesFailed++; m_performance->collect("resendQueued", (long int)(m_data->size())); m_resendQueues.push_back(m_data); m_data = NULL; m_failCnt = 1; } else { m_performance->collect("storedReadings", (long int)(m_data->size())); if (m_storageFailed) { m_logger->warn("Storage operational after %d failures", m_storesFailed); m_storageFailed = false; m_storesFailed = 0; } m_failCnt = 0; std::map<std::string, int> statsEntriesCurrQueue; // check if this requires addition of a new asset tracker tuple // Remove the Readings in the vector AssetTracker *tracker = AssetTracker::getAssetTracker(); string lastAsset; int *lastStat = NULL; std::map <std::string, std::set<std::string> > assetDatapointMap; for (vector<Reading *>::iterator it = m_data->begin(); it != m_data->end(); ++it) { Reading *reading = *it; string assetName = reading->getAssetName(); assetName = escape(assetName); const std::vector<Datapoint *> dpVec = reading->getReadingData(); std::string temp; std::set<std::string> tempSet; // first sort the individual datapoints // e.g. dp2, dp3, dp1 push them in a set,to make them // dp1,dp2,dp3 for ( auto dp : dpVec) { temp.clear(); temp.append(dp->getName()); tempSet.insert(temp); } temp.clear(); // Push them in a set so as to avoid duplication of datapoints // a reading of d1, d2, d3 and another d2,d3,d1 , second will be discarded // for (auto dp: tempSet) { set<string> &s= assetDatapointMap[assetName]; if (s.find(dp) == s.end()) { s.insert(dp); } } if (lastAsset.compare(assetName)) { AssetTrackingTuple tuple(m_serviceName, m_pluginName, assetName, "Ingest"); // Check Asset record exists AssetTrackingTuple* res = tracker->findAssetTrackingCache(tuple); if (res == NULL) { // Record not in cache, add it tracker->addAssetTrackingTuple(tuple); } else { // Un-deprecate asset tracking record unDeprecateAssetTrackingRecord(res, assetName, "Ingest"); } lastAsset = assetName; lastStat = &statsEntriesCurrQueue[assetName]; (*lastStat)++; } else if (lastStat) { (*lastStat)++; } // delete reading; } for( auto & rdng : *m_data) { delete rdng; } m_data->clear(); for (auto itr : assetDatapointMap) { std::set<string> &s = itr.second; unsigned int count = s.size(); StorageAssetTrackingTuple storageTuple(m_serviceName, m_pluginName, itr.first, "store", false, "", count); StorageAssetTrackingTuple *ptr = &storageTuple; // Update SAsset Tracker database and cache tracker->updateCache(s, ptr); } { unique_lock<mutex> lck(m_statsMutex); for (auto &it : statsEntriesCurrQueue) { statsPendingEntries[it.first] += it.second; } } } } if (m_data) { delete m_data; m_data = NULL; } } while (! m_fullQueues.empty()); } /** * Load filter plugins * * Filters found in configuration are loaded * and add to the Ingest class instance * * @param categoryName Configuration category name * @param ingest The Ingest class reference * Filters are added to m_filters member * False for errors. * @return True if filters were loaded and initialised * or there are no filters * False with load/init errors */ bool Ingest::loadFilters(const string& categoryName) { Logger::getLogger()->info("Ingest::loadFilters(): categoryName=%s", categoryName.c_str()); /* * We do everything to setup the pipeline using a local FilterPipeline and then assign it * to the service m_filterPipeline once it is setup to guard against access to the pipeline * during setup. * This should not be an issue if the mutex is held, however this approach lessens the risk * in the case of this routine being called when the mutex is not held and ensure m_filterPipeline * only ever points to a fully configured filter pipeline. */ lock_guard<mutex> guard(m_pipelineMutex); FilterPipeline *filterPipeline = new FilterPipeline(m_mgtClient, m_storage, m_serviceName); // Try to load filters: if (!filterPipeline->loadFilters(categoryName)) { // Return false on any error return false; } // Set up the filter pipeline bool rval = filterPipeline->setupFiltersPipeline((void *)passToOnwardFilter, (void *)useFilteredData, this); if (rval) { m_filterPipeline = filterPipeline; // If we previously had a debugger attached then attach to the new pipeline if (m_debuggerAttached) { attachDebugger(); setDebuggerBuffer(m_debuggerBufferSize); } } else { Logger::getLogger()->error("Failed to setup the filter pipeline, the filters are not attached to the service"); filterPipeline->cleanupFilters(categoryName); } return rval; } /** * Pass the current readings set to the next filter in the pipeline * * Note: * This routine must be passed to all filters "plugin_init" except the last one * * Static method * * @param outHandle Pointer to next filter * @param readings Current readings set */ void Ingest::passToOnwardFilter(OUTPUT_HANDLE *outHandle, READINGSET *readingSet) { // Get next filter in the pipeline PipelineElement *next = (PipelineElement *)outHandle; // Pass readings to the next stage in the pipeline next->ingest(readingSet); } /** * Use the current input readings (they have been filtered * by all filters) * * The assumption is that one of two things has happened. * * 1. The filtering has all been done in place. In which case * the m_data vector is in the ReadingSet passed in here. * * 2. The filtering has created new ReadingSet in which case * the reading vector must be copied into m_data from the * ReadingSet. * * Note: * This routine must be passed to last filter "plugin_init" only * * Static method * * @param outHandle Pointer to Ingest class instance * @param readingSet Filtered reading set being added to Ingest::m_data */ void Ingest::useFilteredData(OUTPUT_HANDLE *outHandle, READINGSET *readingSet) { Ingest* ingest = (Ingest *)outHandle; if (ingest->isolated()) { delete readingSet; return; } lock_guard<mutex> guard(ingest->m_useDataMutex); vector<Reading *> *newData = readingSet->getAllReadingsPtr(); if (!ingest->m_data) { // If we are called during shutdown there will be no m_data in place // and we create a new one to handle this special case. In this case // the m_data will not be explicitly deleted. However as we are shutting // down this will note cause a problem as all memory is recovered at process // exit time. ingest->m_data = new vector<Reading *>; } ingest->m_data->insert(ingest->m_data->end(), newData->cbegin(), newData->cend()); readingSet->clear(); delete readingSet; } /** * Configuration change for one of the filters or to the pipeline. * * @param category The name of the configuration category * @param newConfig The new category contents */ void Ingest::configChange(const string& category, const string& newConfig) { Logger::getLogger()->debug("Ingest::configChange(): category=%s, newConfig=%s", category.c_str(), newConfig.c_str()); string advanced = m_serviceName + "Advanced"; if (category == m_serviceName) { /** * The category that has changed is the one for the south service itself. * The only item that concerns us here is the filter item that defines * the filter pipeline. We extract that item and check to see if it defines * a pipeline that is different to the one we currently have. * * If it is we destroy the current pipeline and create a new one. */ ConfigCategory config("tmp", newConfig); string newPipeline = ""; if (config.itemExists("filter")) { newPipeline = config.getValue("filter"); } { lock_guard<mutex> guard(m_pipelineMutex); if (m_filterPipeline) { if (newPipeline == "" || m_filterPipeline->hasChanged(newPipeline) == false) { Logger::getLogger()->info("Ingest::configChange(): " "filter pipeline is not set or " "it hasn't changed"); return; } /* The new filter pipeline is different to what we have already running * So remove the current pipeline and recreate. */ m_running = false; Logger::getLogger()->info("Ingest::configChange(): " "filter pipeline has changed, " "recreating filter pipeline"); m_filterPipeline->cleanupFilters(m_serviceName); delete m_filterPipeline; m_filterPipeline = NULL; } } /* * We have to setup a new pipeline to match the changed configuration. * Release the lock before reloading the filters as this will acquire * the lock again */ loadFilters(category); // Set m_running holding the lock lock_guard<mutex> guard(m_pipelineMutex); m_running = true; } else if (category == advanced) { ConfigCategory config("tmp", newConfig); string s = config.getValue("rateMonitoringInterval"); long interval = strtol(s.c_str(), NULL, 10); s = config.getValue("rateSigmaFactor"); long factor = strtol(s.c_str(), NULL, 10); m_ingestRate->updateConfig(interval, factor); // TODO If the rate has changed we need to restart the monitoring for // now we trigger this if the category changes m_ingestRate->relearn(); } else { /* * The category is for one of the filters. We simply call the Filter Pipeline * instance and get it to deal with sending the configuration to the right filter. * This is done holding the pipeline mutex to prevent the pipeline being changed * during this call and also to hold the ingest thread from running the filters * during reconfiguration. */ Logger::getLogger()->info("Ingest::configChange(): change to config of some filter(s)"); lock_guard<mutex> guard(m_pipelineMutex); if (m_filterPipeline) { m_filterPipeline->configChange(category, newConfig); } } } /** * Return the number of queued readings in the south service */ size_t Ingest::queueLength() { size_t len = m_queue->size(); // Approximate the amount of data in the full queues len += m_fullQueues.size() * m_queueSizeThreshold; len += m_resendQueues.size() * m_queueSizeThreshold; return len; } /** * Load an up-to-date AssetTracking record for the given parameters * and un-deprecate AssetTracking record it has been found as deprecated * Existing cache element is updated * * @param currentTuple Current AssetTracking record for given assetName * @param assetName AssetName to fetch from AssetTracking * @param event The event type to fetch */ void Ingest::unDeprecateAssetTrackingRecord(AssetTrackingTuple* currentTuple, const string& assetName, const string& event) { time_t now = time(0); if (m_deprecatedAgeOut < now) { delete m_deprecated; m_deprecated = m_mgtClient->getDeprecatedAssetTrackingTuples(); m_deprecatedAgeOut = now + DEPRECATED_CACHE_AGE; } if (m_deprecated && m_deprecated->find(assetName)) { // The asset is deprecated possibly m_deprecated->remove(assetName); } else { // The asset is not believed to be deprecated so return. If // it has been deprecated since we last loaded the cache this // will leave the asset incorrectly deprecated. This will be // resolved next time the cache is reloaded return; } // Get up-to-date Asset Tracking record AssetTrackingTuple* updatedTuple = m_mgtClient->getAssetTrackingTuple( m_serviceName, assetName, event); bool unDeprecateDataPoints = false; if (updatedTuple) { if (updatedTuple->isDeprecated()) { // Update un-deprecated state in cached object currentTuple->unDeprecate(); m_logger->debug("Asset '%s' is being un-deprecated", assetName.c_str()); // Prepare UPDATE query const Condition conditionParams(Equals); Where * wAsset = new Where("asset", conditionParams, assetName); Where *wService = new Where("service", conditionParams, m_serviceName, wAsset); Where *wEvent = new Where("event", conditionParams, event, wService); InsertValues unDeprecated; // Set NULL value unDeprecated.push_back(InsertValue("deprecated_ts")); // Update storage with NULL value int rv = m_storage.updateTable("asset_tracker", unDeprecated, *wEvent); // Check update operation if (rv < 0) { m_logger->error("Failure while un-deprecating asset '%s'", assetName.c_str()); } else { string audit_details = "{\"asset\" : \"" + assetName + "\", \"service\" : \"" + m_serviceName + "\", \"event\" : \"" + event + "\"}"; // Add AuditLog entry if (!m_mgtClient->addAuditEntry("ASTUN", "INFORMATION", audit_details)) { m_logger->warn("Failure while adding AuditLog entry " \ " for un-deprecated asset '%s'", assetName.c_str()); } m_logger->info("Asset '%s' has been un-deprecated, event '%s'", assetName.c_str(), event.c_str()); unDeprecateDataPoints = true; } } } else { m_logger->error("Failure to get AssetTracking record " "for service '%s', asset '%s'", m_serviceName.c_str(), assetName.c_str()); } delete updatedTuple; // Undeprecate all "store" events related to the serviceName and assetName if (unDeprecateDataPoints) { // Prepare UPDATE query const Condition conditionParams(Equals); Where * wAsset = new Where("asset", conditionParams, assetName); Where *wService = new Where("service", conditionParams, m_serviceName, wAsset); Where *wEvent = new Where("event", conditionParams, "store", wService); InsertValues unDeprecated; // Set NULL value unDeprecated.push_back(InsertValue("deprecated_ts")); // Update storage with NULL value int rv = m_storage.updateTable("asset_tracker", unDeprecated, *wEvent); // Check update operation if (rv < 0) { m_logger->error("Failure while un-deprecating asset '%s'", assetName.c_str()); } else { m_logger->info("Asset '%s' has been un-deprecated, event '%s'", assetName.c_str(), "store"); } } } /** * Load an up-to-date StorageAssetTracking record for the given parameters * and un-deprecate StorageAssetTracking record it has been found as deprecated * Existing cache element is updated * * @param currentTuple Current StorageAssetTracking record for given assetName * @param assetName AssetName to fetch from AssetTracking * @param datapoints The datapoints comma separated list * @param count The number of datapoints per asset */ void Ingest::unDeprecateStorageAssetTrackingRecord(StorageAssetTrackingTuple* currentTuple, const string& assetName, const string& datapoints, const unsigned int& count) { time_t now = time(0); if (m_deprecatedAgeOutStorage < now) { m_deprecatedAgeOutStorage = now + DEPRECATED_CACHE_AGE; } else { // Nothing to do right now return; } // Get up-to-date Asset Tracking record StorageAssetTrackingTuple* updatedTuple = m_mgtClient->getStorageAssetTrackingTuple( m_serviceName, assetName, "store", datapoints, count); vector<string> tokens; stringstream dpStringStream(datapoints); string temp; while(getline(dpStringStream, temp, ',')) { tokens.push_back(temp); } ostringstream convert; convert << "{"; convert << "\"datapoints\":["; for (unsigned int i = 0; i < tokens.size() ; ++i) { convert << "\"" << tokens[i].c_str() << "\"" ; if (i < tokens.size()-1){ convert << ","; } } convert << "]," ; convert << "\"count\":" << to_string(count).c_str(); convert << "}"; if (updatedTuple) { if (updatedTuple->isDeprecated()) { // Update un-deprecated state in cached object currentTuple->unDeprecate(); m_logger->info("%s:%d, Asset '%s' is being un-deprecated",__FILE__, __LINE__, assetName.c_str()); // Prepare UPDATE query const Condition conditionParams(Equals); Where * wAsset = new Where("asset", conditionParams, assetName); Where *wService = new Where("service", conditionParams, m_serviceName, wAsset); Where *wEvent = new Where("event", conditionParams, "store", wService); Where *wData = new Where("data", conditionParams, JSONescape(convert.str()), wEvent); InsertValues unDeprecated; // Set NULL value unDeprecated.push_back(InsertValue("deprecated_ts")); // Update storage with NULL value int rv = m_storage.updateTable("asset_tracker", unDeprecated, *wData); // Check update operation if (rv < 0) { m_logger->error("%s:%d, Failure while un-deprecating asset '%s'", __FILE__, __LINE__, assetName.c_str()); } } } if (updatedTuple != nullptr) delete updatedTuple; } /** * Set the statistics option. The statistics collection regime may be one of * "per asset", "per service" or "per asset & service". * * @param option The desired statistics collection regime */ void Ingest::setStatistics(const string& option) { unique_lock<mutex> lck(m_statsMutex); if (option.compare("per asset") == 0) m_statisticsOption = STATS_ASSET; else if (option.compare("per service") == 0) m_statisticsOption = STATS_SERVICE; else m_statisticsOption = STATS_BOTH; } /* * Returns comma-separated string from set of datapoints */ std::string Ingest::getStringFromSet(const std::set<std::string> &dpSet) { std::string s; for (auto itr: dpSet) { s.append(itr); s.append(","); } // remove the last comma if (s[s.size() -1] == ',') s.pop_back(); return s; } /** * Implement flow control backoff for the async ingest mechanism. * * The flow control is "soft" in that it will only wait for a maximum * amount of time before continuing regardless of the queue length. * * The mechanism is to have a high water and low water mark. When the queue * get longer than the high water mark we wait until the queue drains below * the low water mark before proceeding. * * The wait is done with a backoff algorithm that start at AFC_SLEEP_INCREMENT * and doubles each time we have not dropped below the low water mark. It will * sleep for a maximum of AFC_SLEEP_MAX before testing again. */ void Ingest::flowControl() { if (m_highWater == 0) // No flow control { return; } if (m_highWater < queueLength()) { m_logger->debug("Waiting for ingest queue to drain"); int total = 0, delay = AFC_SLEEP_INCREMENT; while (total < AFC_MAX_WAIT && queueLength() > m_lowWater) { this_thread::sleep_for(chrono::milliseconds(delay)); total += delay; delay *= 2; if (delay > AFC_SLEEP_MAX) { delay = AFC_SLEEP_MAX; } } m_logger->debug("Ingest queue has %s", queueLength() > m_lowWater ? "failed to drain in sufficient time" : "has drained"); m_performance->collect("flow controlled", total); } } /** * Configure the ingest rate class with the collection interval and * the sigma factor allowed before reporting * * @param interval Number of minutes to average ingest stats over * @param factor Number of standard deviations to allow before reporting */ void Ingest::configureRateMonitor(long interval, long factor) { m_ingestRate->updateConfig(interval, factor); } ================================================ FILE: C/services/south/ingestRate.cpp ================================================ /* * Fledge readings ingest rate. * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <ingest_rate.h> #include <thread> #include <logger.h> using namespace std; /** * Constructor for ingest rate class * * @param mgmtClient The management Client interface */ IngestRate::IngestRate(ManagementClient *mgmtClient, const std::string& service) : m_mgmtClient(mgmtClient), m_service(service), m_currentInterval(0), m_thisInterval(0), m_mean(0), m_dsq(0), m_count(0), m_factor(3), m_alerted(false), m_primed(false) { m_numIntervals = 60 / FLUSH_STATS_INTERVAL; } /** * Destructor for the ingest rate class */ IngestRate::~IngestRate() { } /** * Update the configuration of the ingest rate mechanism * * @param interval Number of minutes to average over * @param factor Number of standard deviations to tolarate */ void IngestRate::updateConfig(int interval, int factor) { bool restart = false; if (interval * 60 != m_numIntervals * FLUSH_STATS_INTERVAL) { m_numIntervals = (interval * 60) / FLUSH_STATS_INTERVAL; restart = true; } if (m_factor != factor) { m_factor = factor; } if (restart) { relearn(); } } /** * The configuration has changed so we need to reset our state * and go back into the mode of determining what a good mean and * standard deviation is for the select interval. */ void IngestRate::relearn() { lock_guard<mutex> guard(m_mutex); m_count = 0; m_thisInterval = 0; m_currentInterval = 0; m_dsq = 0; m_mean = 0; } /** * Called each time we ingest any readings. * * @param numberReadings The number of readings ingested */ void IngestRate::ingest(unsigned int numberReadings) { if (m_numIntervals == 0) return; lock_guard<mutex> guard(m_mutex); m_thisInterval += numberReadings; } /** * Called periodically by the stats update thread. Called every FLUSH_STATS_INTERVAL seconds */ void IngestRate::periodic() { if (m_numIntervals == 0) return; updateCounters(); } /** * The periodic work that needs to be done holding the mutex */ void IngestRate::updateCounters() { lock_guard<mutex> guard(m_mutex); m_currentInterval++; if (m_currentInterval == m_numIntervals) { if (m_count > IGRSAMPLES) { Logger::getLogger()->debug("Ingest rate checking for service %s is enabled", m_service.c_str()); double sigma = sqrt(m_dsq / m_count); if (m_thisInterval < (m_mean - (m_factor * sigma)) || m_thisInterval > (m_mean + (m_factor * sigma))) { if (m_primed) { // Outlier detected string key = "SouthIngestRate" + m_service; string message = "Ingest rate of the south service " + m_service + " falls outside of normal boundaries"; m_mgmtClient->raiseAlert(key, message); Logger::getLogger()->warn("Current ingest rate falls outside normal boundaries, rate is %ld with average rate of %f", m_thisInterval, m_mean); m_alerted = true; } else { // We have had one outlier, prime the alert on the second consequtive outlier m_primed = true; } } else if (m_alerted) { string key = "SouthIngestRate" + m_service; m_mgmtClient->clearAlert(key); m_primed = false; m_alerted = false; } else { m_primed = false; } } m_count++; double meanDiff = (m_thisInterval - m_mean) / m_count; double newMean = m_mean + meanDiff; double dsqInc = (m_thisInterval - newMean) * (m_thisInterval - m_mean); m_dsq += dsqInc; m_mean = newMean; m_thisInterval = 0; m_currentInterval = 0; } } ================================================ FILE: C/services/south/south.cpp ================================================ /* * Fledge south service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <sys/timerfd.h> #include <time.h> #include <stdint.h> #include <stdlib.h> #include <signal.h> #include <execinfo.h> #include <dlfcn.h> // for dladdr #include <cxxabi.h> // for __cxa_demangle #include <unistd.h> #include <south_service.h> #include <south_api.h> #include <management_api.h> #include <storage_client.h> #include <service_record.h> #include <plugin_manager.h> #include <plugin_api.h> #include <plugin.h> #include <logger.h> #include <reading.h> #include <ingest.h> #include <iostream> #include <defaults.h> #include <filter_plugin.h> #include <config_handler.h> #include <syslog.h> #include <pyruntime.h> #define SERVICE_TYPE "Southbound" #define RESOURCE_LIMIT_CATEGORY "RESOURCE_LIMIT" extern int makeDaemon(void); extern void handler(int sig); static void reconfThreadMain(void *arg); using namespace std; // Displays service information in JSON format static void printServiceInfoAsJSON() { static std::string serviceInfoJSON = R"({"name":"South Service","description":"Service To Ingress Data","type":")" + std::string(SERVICE_TYPE) + R"(","process":"south_c","process_script":"[\"services/south_c\"]","startup_priority":100})"; std::cout << serviceInfoJSON << std::endl; } /** * South service main entry point */ int main(int argc, char *argv[]) { unsigned short corePort = 8082; string coreAddress = "localhost"; bool daemonMode = true; string myName = SERVICE_NAME; string logLevel = "warning"; string token = ""; bool dryrun = false; signal(SIGSEGV, handler); signal(SIGILL, handler); signal(SIGBUS, handler); signal(SIGFPE, handler); signal(SIGABRT, handler); for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--info")) { printServiceInfoAsJSON(); return 0; } if (!strcmp(argv[i], "-d")) { daemonMode = false; } else if (!strncmp(argv[i], "--port=", 7)) { corePort = (unsigned short)strtol(&argv[i][7], NULL, 10); } else if (!strncmp(argv[i], "--name=", 7)) { myName = &argv[i][7]; } else if (!strncmp(argv[i], "--address=", 10)) { coreAddress = &argv[i][10]; } else if (!strncmp(argv[i], "--logLevel=", 11)) { logLevel = &argv[i][11]; } else if (!strncmp(argv[i], "--token=", 8)) { token = &argv[i][8]; } else if (!strncmp(argv[i], "--dryrun", 8)) { dryrun = true; } } #ifdef PROFILING char profilePath[200]{0}; if (getenv("FLEDGE_DATA")) { snprintf(profilePath, sizeof(profilePath), "%s/%s_Profile", getenv("FLEDGE_DATA"), myName.c_str()); } else if (getenv("FLEDGE_ROOT")) { snprintf(profilePath, sizeof(profilePath), "%s/data/%s_Profile", getenv("FLEDGE_ROOT"), myName.c_str()); } else { snprintf(profilePath, sizeof(profilePath), "/usr/local/fledge/data/%s_Profile", myName.c_str()); } mkdir(profilePath, 0777); chdir(profilePath); #endif if (daemonMode && makeDaemon() == -1) { // Failed to run in daemon mode cout << "Failed to run as deamon - proceeding in interactive mode." << endl; } SouthService *service = new SouthService(myName, token); if (dryrun) { service->setDryRun(); } Logger *logger = Logger::getLogger(); logger->setMinLevel(logLevel); // Start the service. This will only return whren the serivce is shutdown service->start(coreAddress, corePort); delete service; delete logger; return 0; } /** * Detach the process from the terminal and run in the background. */ int makeDaemon() { pid_t pid; /* Make the child process inherit the log level */ int logmask = setlogmask(0); /* create new process */ if ((pid = fork() ) == -1) { return -1; } else if (pid != 0) { exit (EXIT_SUCCESS); } setlogmask(logmask); // If we got here we are a child process // create new session and process group if (setsid() == -1) { return -1; } // Close stdin, stdout and stderr close(0); close(1); close(2); // redirect fd's 0,1,2 to /dev/null (void)open("/dev/null", O_RDWR); // stdin if (dup(0) == -1) {} // stdout Workaround for GCC bug 66425 produces warning if (dup(0) == -1) {} // stderr WOrkaround for GCC bug 66425 produces warning return 0; } void handler(int sig) { Logger *logger = Logger::getLogger(); void *array[20]; char buf[1024]; int size; // get void*'s for all entries on the stack size = backtrace(array, 20); // print out all the frames to stderr logger->fatal("Signal %d (%s) trapped:\n", sig, strsignal(sig)); char **messages = backtrace_symbols(array, size); for (int i = 0; i < size; i++) { Dl_info info; if (dladdr(array[i], &info) && info.dli_sname) { char *demangled = NULL; int status = -1; if (info.dli_sname[0] == '_') demangled = abi::__cxa_demangle(info.dli_sname, NULL, 0, &status); snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd---------", i, int(2 + sizeof(void*) * 2), array[i], status == 0 ? demangled : info.dli_sname == 0 ? messages[i] : info.dli_sname, (char *)array[i] - (char *)info.dli_saddr); free(demangled); } else { snprintf(buf, sizeof(buf), "%-3d %*p %s---------", i, int(2 + sizeof(void*) * 2), array[i], messages[i]); } logger->fatal("(%d) %s", i, buf); } free(messages); exit(1); } /** * Callback called by south plugin to ingest readings into Fledge * * @param ingest The ingest class to use * @param reading The Reading to ingest */ void doIngest(Ingest *ingest, Reading reading) { ingest->ingest(reading); } void doIngestV2(Ingest *ingest, ReadingSet *set) { std::vector<Reading *> *vec = set->getAllReadingsPtr(); if (!vec) { Logger::getLogger()->info("%s:%d: V2 async ingest method: vec is NULL", __FUNCTION__, __LINE__); return; } // move reading vector from set to new vector vec2 std::vector<Reading *> *vec2 = set->moveAllReadings(); Logger::getLogger()->debug("%s:%d: V2 async ingest method returned: vec->size()=%d", __FUNCTION__, __LINE__, vec->size()); ingest->ingest(vec2); delete vec2; // each reading object inside vector has been allocated on heap and moved to Ingest class's internal queue delete set; ingest->flowControl(); } /** * Constructor for the south service */ SouthService::SouthService(const string& myName, const string& token) : southPlugin(NULL), m_assetTracker(NULL), m_shutdown(false), m_readingsPerSec(1), m_throttle(false), m_throttled(false), m_token(token), m_repeatCnt(1), m_pluginData(NULL), m_dryRun(false), m_requestRestart(false), m_auditLogger(NULL), m_perfMonitor(NULL), m_suspendIngest(false), m_steps(0), m_provider(NULL), m_controlEnabled(true), m_debuggerEnabled(true) { m_name = myName; m_type = SERVICE_TYPE; m_pollType = POLL_INTERVAL; logger = new Logger(myName); logger->setMinLevel("warning"); m_reconfThread = new std::thread(reconfThreadMain, this); } /** * Destructor for south service */ SouthService::~SouthService() { m_cvNewReconf.notify_all(); // Wakeup the reconfigure thread to terminate it m_reconfThread->join(); delete m_reconfThread; if (m_pluginData) delete m_pluginData; if (m_perfMonitor) delete m_perfMonitor; delete m_assetTracker; delete m_auditLogger; delete m_mgtClient; delete m_provider; // We would like to shutdown the Python environment if it // was running. However this causes a segmentation fault within Python // so we currently can not do this #if PYTHON_SHUTDOWN PythonRuntime::shutdown(); // Shutdown and release Python resources #endif } /** * Start the south service */ void SouthService::start(string& coreAddress, unsigned short corePort) { unsigned short managementPort = (unsigned short)0; ManagementApi management(SERVICE_NAME, managementPort); // Start managemenrt API logger->info("Starting south service..."); m_provider = new SouthServiceProvider(this); management.registerProvider(m_provider); management.registerService(this); // Listen for incoming managment requests management.start(); // Create the south API SouthApi *api = new SouthApi(this); if (!api) { logger->fatal("Unable to create API object"); return; } // Allow time for the listeners to start before we register sleep(1); if (! m_shutdown) { unsigned short sport = api->getListenerPort(); // Now register our service // TODO proper hostname lookup unsigned short managementListener = management.getListenerPort(); ServiceRecord record(m_name, // Service name SERVICE_TYPE, // Service type "http", // Protocol "localhost", // Listening address sport, // Service port managementListener, // Management port m_token); // Token // Allocate and save ManagementClient object m_mgtClient = new ManagementClient(coreAddress, corePort); // Create the audit logger instance m_auditLogger = new AuditLogger(m_mgtClient); // Create an empty South category if one doesn't exist DefaultConfigCategory southConfig(string("South"), string("{}")); southConfig.setDescription("South"); m_mgtClient->addCategory(southConfig, true); // Get configuration for service name m_config = m_mgtClient->getCategory(m_name); m_configResourceLimit = m_mgtClient->getCategory(RESOURCE_LIMIT_CATEGORY); if (!loadPlugin()) { logger->fatal("Failed to load south plugin %s, exiting...", m_name.c_str()); string key = m_name + "LoadPlugin"; m_mgtClient->raiseAlert(key, "South service " + m_name + " is shutting down due to a failure loading the south plugin"); management.stop(); return; } if (southPlugin->hasControl()) { logger->info("South plugin has a control facility, adding south service API"); } if (!m_dryRun) { if (!m_mgtClient->registerService(record)) { logger->error("Failed to register service %s", m_name.c_str()); management.stop(); return; } ConfigCategory features = m_mgtClient->getCategory("FEATURES"); updateFeatures(features); // Register for category content changes ConfigHandler *configHandler = ConfigHandler::getInstance(m_mgtClient); configHandler->registerCategory(this, m_name); configHandler->registerCategory(this, m_name+"Advanced"); configHandler->registerCategory(this, "FEATURES"); configHandler->registerCategory(this, RESOURCE_LIMIT_CATEGORY); } // Get a handle on the storage layer ServiceRecord storageRecord("Fledge Storage"); if (!m_mgtClient->getService(storageRecord)) { logger->fatal("Unable to find storage service"); return; } logger->info("Connect to storage on %s:%d", storageRecord.getAddress().c_str(), storageRecord.getPort()); StorageClient storage(storageRecord.getAddress(), storageRecord.getPort()); storage.registerManagement(m_mgtClient); m_perfMonitor = new PerformanceMonitor(m_name, &storage); unsigned int threshold = 100; long timeout = 5000; std::string pluginName; try { if (m_configAdvanced.itemExists("bufferThreshold")) threshold = (unsigned int)strtol(m_configAdvanced.getValue("bufferThreshold").c_str(), NULL, 10); if (m_configAdvanced.itemExists("maxSendLatency")) timeout = strtol(m_configAdvanced.getValue("maxSendLatency").c_str(), NULL, 10); if (m_config.itemExists("plugin")) pluginName = m_config.getValue("plugin"); if (m_configAdvanced.itemExists("logLevel")) { string prevLogLevel = logger->getMinLevel(); logger->setMinLevel(m_configAdvanced.getValue("logLevel")); PluginManager *manager = PluginManager::getInstance(); PLUGIN_TYPE type = manager->getPluginImplType(southPlugin->getHandle()); logger->debug("%s:%d: plugin type = %s", __FUNCTION__, __LINE__, (type==PYTHON_PLUGIN)?"PYTHON_PLUGIN":"BINARY_PLUGIN"); if (type == PYTHON_PLUGIN) { // propagate loglevel changes to python filters/plugins, if present logger->debug("prevLogLevel=%s, m_configAdvanced.getValue(\"logLevel\")=%s", prevLogLevel.c_str(), m_configAdvanced.getValue("logLevel").c_str()); if (prevLogLevel.compare(m_configAdvanced.getValue("logLevel")) != 0) { logger->debug("calling southPlugin->reconfigure() for updating loglevel"); southPlugin->reconfigure("logLevel"); } } } if (m_configAdvanced.itemExists("throttle")) { string throt = m_configAdvanced.getValue("throttle"); if (throt[0] == 't' || throt[0] == 'T') { m_throttle = true; m_highWater = threshold + (((float)threshold * SOUTH_THROTTLE_HIGH_PERCENT) / 100.0); m_lowWater = threshold + (((float)threshold * SOUTH_THROTTLE_LOW_PERCENT) / 100.0); logger->info("Throttling is enabled, high water mark is set to %ld", m_highWater); } else { m_throttle = false; } } } catch (ConfigItemNotFound& e) { logger->info("Defaulting to inline defaults for south configuration"); } m_assetTracker = new AssetTracker(m_mgtClient, m_name); if (m_configAdvanced.itemExists("assetTrackerInterval")) { string interval = m_configAdvanced.getValue("assetTrackerInterval"); unsigned long i = strtoul(interval.c_str(), NULL, 10); if (m_assetTracker) m_assetTracker->tune(i); } { // Instantiate the Ingest class Ingest ingest(storage, m_name, pluginName, m_mgtClient); ingest.setPerfMon(m_perfMonitor); m_ingest = &ingest; if (m_throttle) { m_ingest->setFlowControl(m_lowWater, m_highWater); } if (m_configAdvanced.itemExists("statistics")) { m_ingest->setStatistics(m_configAdvanced.getValue("statistics")); } if (m_configAdvanced.itemExists("perfmon")) { string perf = m_configAdvanced.getValue("perfmon"); if (perf.compare("true") == 0) m_perfMonitor->setCollecting(true); else m_perfMonitor->setCollecting(false); } if (m_configAdvanced.itemExists("rateMonitoringInterval") && m_configAdvanced.itemExists("rateSigmaFactor")) { string s = m_configAdvanced.getValue("rateMonitoringInterval"); long interval = strtol(s.c_str(), NULL, 10); s = m_configAdvanced.getValue("rateSigmaFactor"); long factor = strtol(s.c_str(), NULL, 10); ingest.configureRateMonitor(interval, factor); } getResourceLimit(); m_ingest->setResourceLimit(m_serviceBufferingType, m_serviceBufferSize, m_discardPolicy); m_ingest->start(timeout, threshold); // Start the ingest threads running try { m_readingsPerSec = 1; if (m_configAdvanced.itemExists("readingsPerSec")) m_readingsPerSec = (unsigned long)strtol(m_configAdvanced.getValue("readingsPerSec").c_str(), NULL, 10); if (m_readingsPerSec < 1) { logger->warn("Invalid setting of reading rate, defaulting to 1"); m_readingsPerSec = 1; } } catch (ConfigItemNotFound& e) { logger->info("Defaulting to inline default for poll interval"); } // Load filter plugins and set them in the Ingest class if (!ingest.loadFilters(m_name)) { string errMsg("'" + m_name + "' plugin: failed loading filter plugins."); Logger::getLogger()->fatal((errMsg + " Shutting down south service.").c_str()); string key = m_name + "LoadPipeline"; m_mgtClient->raiseAlert(key, "South service " + m_name + " is shutting down due to a failure to create the data pipeline"); return; } if (southPlugin->persistData()) { m_pluginData = new PluginData(new StorageClient(storageRecord.getAddress(), storageRecord.getPort())); m_dataKey = m_name + m_config.getValue("plugin"); } // Create default security category this->createSecurityCategories(m_mgtClient, m_dryRun); if (!m_dryRun) // If not a dry run then handle readings { // Get and ingest data if (! southPlugin->isAsync()) { calculateTimerRate(); m_timerfd = createTimerFd(m_desiredRate); // interval to be passed is in usecs m_currentRate = m_desiredRate; if (m_timerfd < 0) { logger->fatal("Could not create timer FD"); return; } int pollCount = 0; struct timespec start, end; if (clock_gettime(CLOCK_MONOTONIC, &start) == -1) Logger::getLogger()->error("polling loop start: clock_gettime"); const char *pluginInterfaceVer = southPlugin->getInfo()->interface; bool pollInterfaceV2 = (pluginInterfaceVer[0]=='2' && pluginInterfaceVer[1]=='.'); logger->info("pollInterfaceV2=%s", pollInterfaceV2?"true":"false"); /* * Start the plugin. If it fails with an exception, retry the start with a delay * That delay starts at 500mS and will backoff to 1 minute * * We will continue to retry the start until the service is shutdown */ bool started = false; int delay = 500; while (started == false && m_shutdown == false) { if (southPlugin->persistData()) { Logger::getLogger()->debug("Plugin persists data"); string pluginData = m_pluginData->loadStoredData(m_dataKey); try { southPlugin->startData(pluginData); started = true; } catch (...) { Logger::getLogger()->debug("Plugin start raised an exception"); } } else { Logger::getLogger()->debug("Plugin does not persist data"); started = true; } if (!started) { std::this_thread::sleep_for(std::chrono::milliseconds(delay)); if (delay < 60 * 1000) // Backoff the delay to 1 minute { delay *= 2; } } } while (!m_shutdown) { uint64_t exp = 0; ssize_t s; if (m_pollType == POLL_FIXED) { if (syncToNextPoll()) exp = 1; // Perform one poll } else if (m_pollType == POLL_INTERVAL) { long rep = m_repeatCnt; while (rep > 0) { s = read(m_timerfd, &exp, sizeof(uint64_t)); if ((unsigned int)s != sizeof(uint64_t)) logger->error("timerfd read()"); if (exp > 100 && exp > m_readingsPerSec/2) logger->error("%d expiry notifications accumulated", exp); rep--; if (m_shutdown) { break; } checkPendingReconfigure(); if (rep > m_repeatCnt) { // Reconfigure has resulted in more frequent // polling rep = m_repeatCnt; } } } else if (m_pollType == POLL_ON_DEMAND) { if (onDemandPoll()) exp = 1; } if (m_shutdown) { break; } #if DO_CATCHUP for (uint64_t i=0; i<exp; i++) #endif { bool doPoll = true; if (isSuspended()) { doPoll = false; if (willStep()) { doPoll = true; } } if (doPoll && (!pollInterfaceV2)) // v1 poll method { Reading reading = southPlugin->poll(); if (reading.getDatapointCount()) { ingest.ingest(reading); } ++pollCount; } else if (doPoll)// V2 poll method { checkPendingReconfigure(); ReadingSet *set = southPlugin->pollV2(); if (set) { std::vector<Reading *> *vec = set->getAllReadingsPtr(); if (!vec) { Logger::getLogger()->info("%s:%d: V2 poll method: vec is NULL", __FUNCTION__, __LINE__); continue; } // move reading vector from set to vec2 std::vector<Reading *> *vec2 = set->moveAllReadings(); ingest.ingest(vec2); pollCount += (int) vec2->size(); delete vec2; // each reading object inside vector has been allocated on heap and moved to Ingest class's internal queue delete set; } } else { checkPendingReconfigure(); } throttlePoll(); } } if (clock_gettime(CLOCK_MONOTONIC, &end) == -1) Logger::getLogger()->error("polling loop end: clock_gettime"); int secs = end.tv_sec - start.tv_sec; int nsecs = end.tv_nsec - start.tv_nsec; if (nsecs < 0) { secs--; nsecs += 1000000000; } Logger::getLogger()->info("%d readings generated in %d.%d secs", pollCount, secs, nsecs); close(m_timerfd); } else { const char *pluginInterfaceVer = southPlugin->getInfo()->interface; bool pollInterfaceV2 = (pluginInterfaceVer[0]=='2' && pluginInterfaceVer[1]=='.'); Logger::getLogger()->info("pluginInterfaceVer=%s, pollInterfaceV2=%s", pluginInterfaceVer, pollInterfaceV2?"true":"false"); if (!pollInterfaceV2) southPlugin->registerIngest((INGEST_CB)doIngest, &ingest); else southPlugin->registerIngestV2((INGEST_CB2)doIngestV2, &ingest); bool started = false; int backoff = 1000; while (started == false && m_shutdown == false) { try { if (southPlugin->persistData()) { string pluginData = m_pluginData->loadStoredData(m_dataKey); Logger::getLogger()->debug("Plugin persists data, %s", pluginData.c_str()); southPlugin->startData(pluginData); } else { Logger::getLogger()->debug("Plugin does not persist data"); southPlugin->start(); } started = true; } catch (...) { Logger::getLogger()->debug("Plugin start raised an exception"); std::this_thread::sleep_for(std::chrono::milliseconds(backoff)); if (backoff < 60000) { backoff *= 2; } } } while (!m_shutdown) { std::this_thread::sleep_for(std::chrono::milliseconds(1000)); } } } else { m_shutdown = true; Logger::getLogger()->info("Dryrun of service, shutting down"); } // Shutdown the API delete api; // do plugin shutdown before destroying Ingest object on stack if (southPlugin) { if (southPlugin->persistData()) { string data = southPlugin->shutdownSaveData(); Logger::getLogger()->debug("Persist plugin data, key: '%s' data: '%s' service name: '%s'", m_dataKey, data.c_str(), m_name.c_str()); m_pluginData->persistPluginData(m_dataKey, data, m_name); } else { southPlugin->shutdown(); } delete southPlugin; southPlugin = NULL; } } // Clean shutdown, unregister the storage service if (!m_dryRun) { if (m_requestRestart) { m_mgtClient->restartService(); } else { m_mgtClient->unregisterService(); } } } management.stop(); logger->info("South service shutdown %s completed", m_dryRun ? "from dry run " : ""); } /** * @brief Retrieves and processes resource limit configuration for the South Service * * This function reads the resource limit configuration values from the service configuration, * validates them, and sets the corresponding member variables. The function handles three main * configuration parameters: * * 1. Service Buffering Type (Unlimited or Limited) * 2. Service Buffer Size (minimum value enforced) * 3. Discard Policy (Discard Oldest, Discard Newest, or Reduce Fidelity) * * If any configuration value is invalid or cannot be parsed, the function logs an error and * applies default values to ensure the service can continue running. * * @throws std::exception Catches any exceptions during configuration parsing and applies defaults */ void SouthService::getResourceLimit() { auto discardPolicyToString = [](DiscardPolicy policy) -> std::string { switch (policy) { case DiscardPolicy::DISCARD_OLDEST: return "Discard Oldest"; case DiscardPolicy::REDUCE_FIDELITY: return "Reduce Fidelity"; case DiscardPolicy::DISCARD_NEWEST: return "Discard Newest"; default: throw std::invalid_argument("Invalid DiscardPolicy enum value"); } }; auto serviceBufferingTypeToString = [](ServiceBufferingType type) -> std::string { switch (type) { case ServiceBufferingType::UNLIMITED: return "Unlimited"; case ServiceBufferingType::LIMITED: return "Limited"; default: throw std::invalid_argument("Invalid ServiceBufferingType enum value"); } }; // Update the resource limit configuration try { // Parse the service buffering type std::string serviceBuffering = m_configResourceLimit.getValue("serviceBuffering"); if (serviceBuffering == "Unlimited") { m_serviceBufferingType = ServiceBufferingType::UNLIMITED; } else if (serviceBuffering == "Limited") { m_serviceBufferingType = ServiceBufferingType::LIMITED; } else { m_serviceBufferingType = SERVICE_BUFFER_BUFFER_TYPE_DEFAULT; // Default value Logger::getLogger()->error("Invalid 'Service Buffering Type' configuration value: '%s'. Default value '%s' has been applied.", serviceBuffering.c_str(), serviceBufferingTypeToString(m_serviceBufferingType).c_str()); } // Parse and validate the service buffer size try { std::string bufferSizeStr = m_configResourceLimit.getValue("serviceBufferSize"); m_serviceBufferSize = (unsigned int)std::stoi(bufferSizeStr); // Convert to integer if (m_serviceBufferSize < SERVICE_BUFFER_SIZE_MIN) { m_serviceBufferSize = SERVICE_BUFFER_SIZE_DEFAULT; // Default value Logger::getLogger()->error("Invalid 'Service Buffer Size' value: '%s'. The value must be at least %d. Default value of %d has been applied.", bufferSizeStr.c_str(), SERVICE_BUFFER_SIZE_MIN, m_serviceBufferSize); } } catch (const std::exception& e) { // Handle conversion errors and out-of-range values m_serviceBufferSize = SERVICE_BUFFER_SIZE_DEFAULT; // Default value Logger::getLogger()->error("Failed to parse 'serviceBufferSize': %s. Default value '%d' has been applied.", e.what(), m_serviceBufferSize); } // Parse the discard policy std::string discardPolicy = m_configResourceLimit.getValue("discardPolicy"); if (discardPolicy == "Discard Oldest") { m_discardPolicy = DiscardPolicy::DISCARD_OLDEST; } else if (discardPolicy == "Discard Newest") { m_discardPolicy = DiscardPolicy::DISCARD_NEWEST; } else if (discardPolicy == "Reduce Fidelity") { m_discardPolicy = DiscardPolicy::REDUCE_FIDELITY; } else { m_discardPolicy = SERVICE_BUFFER_DISCARD_POLICY_DEFAULT; // Default value Logger::getLogger()->error("Invalid 'Discard Policy' configuration value: '%s'. Default value '%s' has been applied.", discardPolicy.c_str(), discardPolicyToString(m_discardPolicy).c_str()); } Logger::getLogger()->info("Resource Limit configuration applied successfully: " "Service Buffering Type: '%s', " "Service Buffer Size: '%d', " "Discard Policy: '%s'.", serviceBufferingTypeToString(m_serviceBufferingType).c_str(), m_serviceBufferSize, discardPolicyToString(m_discardPolicy).c_str()); } catch (const std::exception& e) { // Catch any other exceptions and log the error Logger::getLogger()->error("Failed to update resource limit configuration due to an exception: %s. Default values will be applied to ensure system stability.", e.what()); // Set default values to ensure the system can continue running m_serviceBufferingType = SERVICE_BUFFER_BUFFER_TYPE_DEFAULT; m_serviceBufferSize = SERVICE_BUFFER_SIZE_DEFAULT; m_discardPolicy = SERVICE_BUFFER_DISCARD_POLICY_DEFAULT; Logger::getLogger()->info("Default configuration applied: " "Service Buffering Type: '%s', " "Service Buffer Size: '%d', " "Discard Policy: '%s'.", serviceBufferingTypeToString(m_serviceBufferingType).c_str(), m_serviceBufferSize, discardPolicyToString(m_discardPolicy).c_str()); } } /** * Stop the storage service/ */ void SouthService::stop() { logger->info("Stopping south service...\n"); } /** * Creates config categories and sub categories recursively, along with their parent-child relations */ void SouthService::createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name, std::string current_name) { // Deal with registering and fetching the configuration DefaultConfigCategory defConfig(configCategory); defConfig.setDescription(current_name); // TODO We do not have access to the description DefaultConfigCategory defConfigCategoryOnly(defConfig); defConfigCategoryOnly.keepItemsType(ConfigCategory::ItemType::CategoryType); defConfig.removeItemsType(ConfigCategory::ItemType::CategoryType); // Create/Update category name (we pass keep_original_items=true) m_mgtClient->addCategory(defConfig, true); // Add this service under 'South' parent category vector<string> children; children.push_back(current_name); m_mgtClient->addChildCategories(parent_name, children); // Adds sub categories to the configuration bool extracted = true; ConfigCategory subCategory; while (extracted) { extracted = subCategory.extractSubcategory(defConfigCategoryOnly); if (extracted) { DefaultConfigCategory defSubCategory(subCategory); createConfigCategories(defSubCategory, current_name, subCategory.getName()); // Cleans the category subCategory.removeItems(); subCategory = ConfigCategory() ; } } } /** * Load the configured south plugin * * TODO Should search for the plugin in specified locations */ bool SouthService::loadPlugin() { try { PluginManager *manager = PluginManager::getInstance(); if (! m_config.itemExists("plugin")) { logger->error("Unable to fetch plugin name from configuration.\n"); return false; } string plugin = m_config.getValue("plugin"); logger->info("Loading south plugin %s.", plugin.c_str()); PLUGIN_HANDLE handle; if ((handle = manager->loadPlugin(plugin, PLUGIN_TYPE_SOUTH)) != NULL) { // Adds categories and sub categories to the configuration DefaultConfigCategory defConfig(m_name, manager->getInfo(handle)->config); createConfigCategories(defConfig, string("South"), m_name); // Must now reload the configuration to obtain any items added from // the plugin // Removes all the m_items already present in the category m_config.removeItems(); m_config = m_mgtClient->getCategory(m_name); m_config.addItem("mgmt_client_url_base", "Management client host and port", "string", "127.0.0.1:0", m_mgtClient->getUrlbase()); try { southPlugin = new SouthPlugin(handle, m_config); } catch (...) { return false; } // Deal with registering and fetching the advanced configuration string advancedCatName = m_name+string("Advanced"); DefaultConfigCategory defConfigAdvanced(advancedCatName, string("{}")); addConfigDefaults(defConfigAdvanced); defConfigAdvanced.setDescription(m_name+string(" advanced config params")); // Create/Update category name (we pass keep_original_items=true) m_mgtClient->addCategory(defConfigAdvanced, true); // Add this service under 'm_name' parent category vector<string> children1; children1.push_back(advancedCatName); m_mgtClient->addChildCategories(m_name, children1); // Must now reload the merged configuration m_configAdvanced = m_mgtClient->getCategory(advancedCatName); return true; } } catch (exception& e) { logger->fatal("Failed to load south plugin: %s\n", e.what()); } return false; } /** * Shutdown request */ void SouthService::shutdown() { /* Stop recieving new requests and allow existing * requests to drain. */ if (m_pollType == POLL_ON_DEMAND) { lock_guard<mutex> lk(m_pollMutex); m_shutdown = true; m_pollCV.notify_all(); } else { m_shutdown = true; } logger->info("South service shutdown in progress."); } /** * Restart request */ void SouthService::restart() { /* Stop recieving new requests and allow existing * requests to drain. */ m_requestRestart = true; m_shutdown = true; logger->info("South service shutdown for restart in progress."); } /** * Configuration change notification * * @param categoryName Category name * @param category Category value */ void SouthService::processConfigChange(const string& categoryName, const string& category) { logger->info("Configuration change in category %s: %s", categoryName.c_str(), category.c_str()); if (categoryName.compare(m_name) == 0) { m_config = ConfigCategory(m_name, category); try { southPlugin->reconfigure(category); } catch (...) { logger->fatal("Unrecoverable failure during South plugin reconfigure, south service exiting..."); shutdown(); } // Let ingest class check for changes to filter pipeline m_ingest->configChange(categoryName, category); } if (categoryName.compare(m_name+"Advanced") == 0) { // Propogate advanced configuration changes to the ingest class always m_ingest->configChange(categoryName, category); m_configAdvanced = ConfigCategory(m_name+"Advanced", category); if (m_configAdvanced.itemExists("statistics")) { m_ingest->setStatistics(m_configAdvanced.getValue("statistics")); } if (m_configAdvanced.itemExists("perfmon")) { string perf = m_configAdvanced.getValue("perfmon"); if (perf.compare("true") == 0) m_perfMonitor->setCollecting(true); else m_perfMonitor->setCollecting(false); } if (! southPlugin->isAsync()) { try { unsigned long newval = (unsigned long)strtol(m_configAdvanced.getValue("readingsPerSec").c_str(), NULL, 10); if (newval < 1) { logger->warn("Invalid setting of reading rate, defaulting to 1"); m_readingsPerSec = 1; } string units = m_configAdvanced.getValue("units"); string pollType = m_configAdvanced.getValue("pollType"); bool wakeup = false; if (m_pollType == POLL_ON_DEMAND) { wakeup = true; } if (pollType.compare("Fixed Times") == 0) { m_pollType = POLL_FIXED; processNumberList(m_configAdvanced, "pollHours", m_hours); processNumberList(m_configAdvanced, "pollMinutes", m_minutes); processNumberList(m_configAdvanced, "pollSeconds", m_seconds); if (m_minutes.size() == 0 && m_hours.size() != 0) m_minutes.push_back(0); if (m_seconds.size() == 0 && m_minutes.size() != 0) m_seconds.push_back(0); m_desiredRate.tv_sec = 1; m_desiredRate.tv_usec = 0; if (wakeup) { // Wakup from on demand polling m_pollCV.notify_all(); } } else if (pollType.compare("Interval") == 0 && (newval != m_readingsPerSec || m_rateUnits.compare(units) != 0)) { m_pollType = POLL_INTERVAL; m_readingsPerSec = newval; m_rateUnits = units; close(m_timerfd); calculateTimerRate(); m_currentRate = m_desiredRate; m_timerfd = createTimerFd(m_desiredRate); // interval to be passed is in usecs if (wakeup) { // Wakup from on demand polling m_pollCV.notify_all(); } } else if (pollType.compare("Interval") == 0 && m_pollType != POLL_INTERVAL) { // Change to interval mode without the rate changing m_pollType = POLL_INTERVAL; if (wakeup) { // Wakup from on demand polling m_pollCV.notify_all(); } } else if (pollType.compare("On Demand") == 0) { m_pollType = POLL_ON_DEMAND; } } catch (ConfigItemNotFound& e) { logger->error("Failed to update poll interval following configuration change"); } } unsigned long threshold = 5000; // This should never be used if (m_configAdvanced.itemExists("bufferThreshold")) { threshold = (unsigned int)strtol(m_configAdvanced.getValue("bufferThreshold").c_str(), NULL, 10); m_ingest->setThreshold(threshold); } if (m_configAdvanced.itemExists("maxSendLatency")) { m_ingest->setTimeout(strtol(m_configAdvanced.getValue("maxSendLatency").c_str(), NULL, 10)); } if (m_configAdvanced.itemExists("logLevel")) { string prevLogLevel = logger->getMinLevel(); logger->setMinLevel(m_configAdvanced.getValue("logLevel")); PluginManager *manager = PluginManager::getInstance(); PLUGIN_TYPE type = manager->getPluginImplType(southPlugin->getHandle()); logger->debug("%s:%d: South plugin type = %s", __FUNCTION__, __LINE__, (type==PYTHON_PLUGIN)?"PYTHON_PLUGIN":"BINARY_PLUGIN"); if (type == PYTHON_PLUGIN) { // propagate loglevel changes to python filters/plugins, if present logger->debug("prevLogLevel=%s, m_configAdvanced.getValue(\"logLevel\")=%s", prevLogLevel.c_str(), m_configAdvanced.getValue("logLevel").c_str()); if (prevLogLevel.compare(m_configAdvanced.getValue("logLevel")) != 0) { logger->debug("%s:%d: calling southPlugin->reconfigure() for updating loglevel", __FUNCTION__, __LINE__); southPlugin->reconfigure("logLevel"); } } } if (m_configAdvanced.itemExists("throttle")) { string throt = m_configAdvanced.getValue("throttle"); if (throt[0] == 't' || throt[0] == 'T') { m_throttle = true; m_highWater = threshold + (((float)threshold * SOUTH_THROTTLE_HIGH_PERCENT) / 100.0); m_lowWater = threshold + (((float)threshold * SOUTH_THROTTLE_LOW_PERCENT) / 100.0); logger->info("Throttling is enabled, high water mark is set to %ld", m_highWater); } else { m_throttle = false; } } if (m_configAdvanced.itemExists("assetTrackerInterval")) { string interval = m_configAdvanced.getValue("assetTrackerInterval"); unsigned long i = strtoul(interval.c_str(), NULL, 10); if (m_assetTracker) m_assetTracker->tune(i); } } // Update the Security category if (categoryName.compare(m_name+"Security") == 0) { this->updateSecurityCategory(category); } // Deal with changes to the features settings if (categoryName.compare("FEATURES") == 0) { this->updateFeatures(ConfigCategory("FEATURES", category)); } if(categoryName.compare(RESOURCE_LIMIT_CATEGORY) == 0) { m_configResourceLimit = ConfigCategory(RESOURCE_LIMIT_CATEGORY, category); getResourceLimit(); m_ingest->setResourceLimit(m_serviceBufferingType, m_serviceBufferSize, m_discardPolicy); } } /** * Separate thread to run plugin_reconf, to avoid blocking * service's management interface due to long plugin_poll calls */ static void reconfThreadMain(void *arg) { SouthService *ss = (SouthService *)arg; Logger::getLogger()->info("reconfThreadMain(): Spawned new thread for plugin reconf"); ss->handlePendingReconf(); Logger::getLogger()->info("reconfThreadMain(): plugin reconf thread exiting"); } /** * Handle configuration change notification; called by reconf thread * Waits for some reconf operation(s) to get queued up, then works thru' them */ void SouthService::handlePendingReconf() { while (isRunning()) { Logger::getLogger()->debug("SouthService::handlePendingReconf: Going into cv wait"); mutex mtx; unique_lock<mutex> lck(mtx); m_cvNewReconf.wait(lck); Logger::getLogger()->debug("SouthService::handlePendingReconf: cv wait has completed; some reconf request(s) has/have been queued up"); unsigned int numPendingReconfs = 0; { lock_guard<mutex> guard(m_pendingNewConfigMutex); numPendingReconfs = m_pendingNewConfig.size(); } while (isRunning() && numPendingReconfs) { std::pair<std::string,std::string> reconfValue; { reconfValue = m_pendingNewConfig.front(); m_pendingNewConfig.pop_front(); } { string categoryName = reconfValue.first; string category = reconfValue.second; logger->info("Handle config change %s, %s", categoryName.c_str(), category.c_str()); processConfigChange(categoryName, category); } { lock_guard<mutex> guard(m_pendingNewConfigMutex); numPendingReconfs = m_pendingNewConfig.size(); } } } } /** * Configuration change notification using a separate thread * * @param categoryName Category name * @param category Category value */ void SouthService::configChange(const string& categoryName, const string& category) { { lock_guard<mutex> guard(m_pendingNewConfigMutex); m_pendingNewConfig.emplace_back(std::make_pair(categoryName, category)); Logger::getLogger()->debug("SouthService::reconfigure(): After adding new entry, m_pendingNewConfig.size()=%d", m_pendingNewConfig.size()); m_cvNewReconf.notify_all(); } } /** * Add the generic south service configuration options to the advanced * category * * @param defaultConfiguration The default configuration from the plugin */ void SouthService::addConfigDefaults(DefaultConfigCategory& defaultConfig) { bool isAsync = southPlugin->isAsync(); for (int i = 0; defaults[i].name; i++) { if (strcmp(defaults[i].name, "readingsPerSec") == 0 && isAsync) { continue; } defaultConfig.addItem(defaults[i].name, defaults[i].description, defaults[i].type, defaults[i].value, defaults[i].value); defaultConfig.setItemDisplayName(defaults[i].name, defaults[i].displayName); if (!strcmp(defaults[i].name, "readingsPerSec")) { defaultConfig.setItemAttribute(defaults[i].name, ConfigCategory::MINIMUM_ATTR, "1"); } } defaultConfig.setItemAttribute("maxSendLatency", ConfigCategory::MAXIMUM_ATTR, to_string(MAXSENDLATENCY)); defaultConfig.setItemAttribute("maxSendLatency", ConfigCategory::MINIMUM_ATTR, "0"); if (!isAsync) { /* Add the reading rate units */ vector<string> rateUnits = { "second", "minute", "hour" }; defaultConfig.addItem("units", "Reading Rate Per", "second", "second", rateUnits); defaultConfig.setItemDisplayName("units", "Reading Rate Per"); /* Now add the fixed time polling option */ vector<string> pollOptions = { "Interval", "Fixed Times", "On Demand" }; defaultConfig.addItem("pollType", "Either poll at fixed intervals, at fixed times or when trigger by a poll control operation.", "Interval", "Interval", pollOptions); defaultConfig.setItemDisplayName("pollType", "Poll Type"); /* Add the validity for interval polling items */ defaultConfig.setItemAttribute("readingsPerSec", ConfigCategory::VALIDITY_ATTR, "pollType == \"Interval\""); defaultConfig.setItemAttribute("units", ConfigCategory::VALIDITY_ATTR, "pollType == \"Interval\""); defaultConfig.setItemAttribute("throttle", ConfigCategory::VALIDITY_ATTR, "pollType == \"Interval\""); /* Add the three time specifiers */ defaultConfig.addItem("pollHours", "List of hours on which to poll or leave empty for all hours", "string", "", ""); defaultConfig.setItemDisplayName("pollHours", "Hours"); defaultConfig.setItemAttribute("pollHours", ConfigCategory::VALIDITY_ATTR, "pollType == \"Fixed Times\""); defaultConfig.addItem("pollMinutes", "List of minutes on which to poll or leave empty for all minutes", "string", "", ""); defaultConfig.setItemDisplayName("pollMinutes", "Minutes"); defaultConfig.setItemAttribute("pollMinutes", ConfigCategory::VALIDITY_ATTR, "pollType == \"Fixed Times\""); defaultConfig.addItem("pollSeconds", "Seconds on which to poll expressed as a comma seperated list", "string", "0,15,30,45", "0,15,30,40"); defaultConfig.setItemDisplayName("pollSeconds", "Seconds"); defaultConfig.setItemAttribute("pollSeconds", ConfigCategory::VALIDITY_ATTR, "pollType == \"Fixed Times\""); } if (southPlugin->hasControl()) { defaultConfig.addItem("control", "Allow write and control operations on the device", "boolean", "true", "true"); defaultConfig.setItemDisplayName("control", "Allow Control"); } /* Add the set of logging levels to the service */ vector<string> logLevels = { "error", "warning", "info", "debug" }; defaultConfig.addItem("logLevel", "Minimum logging level reported", "warning", "warning", logLevels); defaultConfig.setItemDisplayName("logLevel", "Minimum Log Level"); /* Add the set of logging levels to the service */ vector<string> statistics = { "per asset", "per service", "per asset & service" }; defaultConfig.addItem("statistics", "Collect statistics either for every asset ingested, for the service in total or both", "per asset & service", "per asset & service", statistics); defaultConfig.setItemDisplayName("statistics", "Statistics Collection"); defaultConfig.addItem("perfmon", "Track and store performance counters", "boolean", "false", "false"); defaultConfig.setItemDisplayName("perfmon", "Performance Counters"); // Rate Monitoring options defaultConfig.addItem("rateMonitoringInterval", "The interval in minutes to use when calculating average ingestion rates for monitoring the service ingestion", "integer", "1", "1"); defaultConfig.setItemDisplayName("rateMonitoringInterval", "Monitoring Period"); defaultConfig.setItemAttribute("rateMonitoringInterval", ConfigCategory::MINIMUM_ATTR, "0"); defaultConfig.addItem("rateSigmaFactor", "The sensitivity of the ingest rate monitor, expressed as a number of standard deviations of the average ingest rate.", "integer", "3", "3"); defaultConfig.setItemDisplayName("rateSigmaFactor", "Monitoring Sensitivity"); defaultConfig.setItemAttribute("rateSigmaFactor", ConfigCategory::MINIMUM_ATTR, "1"); } /** * Create a timer FD on which a read would return data every time the given * interval elapses * * @param usecs Time in micro-secs after which data would be available on the timer FD */ int SouthService::createTimerFd(struct timeval rate) { int fd = -1; struct itimerspec new_value; struct timespec now; if (clock_gettime(CLOCK_REALTIME, &now) == -1) Logger::getLogger()->error("clock_gettime"); new_value.it_value.tv_sec = now.tv_sec + rate.tv_sec; new_value.it_value.tv_nsec = now.tv_nsec + rate.tv_usec*1000; if (new_value.it_value.tv_nsec >= 1000000000) { new_value.it_value.tv_sec += new_value.it_value.tv_nsec/1000000000; new_value.it_value.tv_nsec %= 1000000000; } new_value.it_interval.tv_sec = rate.tv_sec; new_value.it_interval.tv_nsec = rate.tv_usec*1000; if (new_value.it_interval.tv_nsec >= 1000000000) { new_value.it_interval.tv_sec += new_value.it_interval.tv_nsec/1000000000; new_value.it_interval.tv_nsec %= 1000000000; } errno=0; fd = timerfd_create(CLOCK_REALTIME, 0); if (fd == -1) { Logger::getLogger()->error("timerfd_create failed, errno=%d (%s)", errno, strerror(errno)); return fd; } if (timerfd_settime(fd, TFD_TIMER_ABSTIME, &new_value, NULL) == -1) { Logger::getLogger()->error("timerfd_settime failed, errno=%d (%s)", errno, strerror(errno)); close (fd); return -1; } return fd; } /** * If enabled, control the throttling of the poll rate in order to keep * the buffer usage of the service within check. * * Although this is written as if rate is being control, which it * logically is, the actual values are poll intervals. Hence reducing * the poll rate increases the value of m_currentRate. */ void SouthService::throttlePoll() { struct timeval now, res; if (!m_throttle) { return; } double desired = m_desiredRate.tv_sec + ((double)m_desiredRate.tv_usec / 1000000); desired *= m_repeatCnt; gettimeofday(&now, NULL); timersub(&now, &m_lastThrottle, &res); if (m_ingest->queueLength() > m_highWater && res.tv_sec > SOUTH_THROTTLE_DOWN_INTERVAL) { double rate = m_currentRate.tv_sec + ((double)m_currentRate.tv_usec / 1000000); rate *= (1.0 + ((double)SOUTH_THROTTLE_PERCENT / 100.0)); if (rate > MAX_SLEEP * 1000000) { double x = rate / (MAX_SLEEP * 1000000); m_repeatCnt = ceil(x); rate /= m_repeatCnt; } else { m_repeatCnt = 1; } m_currentRate.tv_sec = (long)rate; m_currentRate.tv_usec = (rate - m_currentRate.tv_sec) * 1000000; close(m_timerfd); m_timerfd = createTimerFd(m_currentRate); // interval to be passed is in usecs m_lastThrottle = now; m_throttled = true; logger->warn("%s Throttled down poll, rate is now %.1f%% of desired rate", m_name.c_str(), (desired * 100) / rate); m_perfMonitor->collect("throttled rate", (long)(rate * 1000)); } else if (m_throttled && m_ingest->queueLength() < m_lowWater && res.tv_sec > SOUTH_THROTTLE_UP_INTERVAL) { // We are currently throttled back but the queue is below the low water mark timersub(&m_desiredRate, &m_currentRate, &res); if (res.tv_sec != 0 || res.tv_usec != 0) { double rate = m_currentRate.tv_sec + ((double)m_currentRate.tv_usec / 1000000); rate *= (1.0 - ((double)SOUTH_THROTTLE_PERCENT / 100.0)); if (rate > MAX_SLEEP * 1000000) { double x = rate / (MAX_SLEEP * 1000000); m_repeatCnt = ceil(x); rate /= m_repeatCnt; } else { m_repeatCnt = 1; } m_currentRate.tv_sec = (long)rate; m_currentRate.tv_usec = (rate - m_currentRate.tv_sec) * 1000000; if (m_currentRate.tv_sec <= m_desiredRate.tv_sec && m_currentRate.tv_usec < m_desiredRate.tv_usec) { m_currentRate = m_desiredRate; m_throttled = false; logger->warn("%s Poll rate returned to configured value", m_name.c_str()); } else { logger->warn("%s Throttled up poll, rate is now %.1f%% of desired rate", m_name.c_str(), (desired * 100) / rate); } m_perfMonitor->collect("throttled rate", (long)(rate * 1000)); close(m_timerfd); m_timerfd = createTimerFd(m_currentRate); // interval to be passed is in usecs m_lastThrottle = now; } } } /** * Perform a setPoint operation on the south plugin * * @param name Name of the point to set * @param value The value to set * @return Success or failure of the SetPoint operation */ bool SouthService::setPoint(const string& name, const string& value) { if (southPlugin->hasControl()) { return southPlugin->write(name, value); } else { logger->warn("SetPoint operation %s = %s attempted on plugin that does not support control", name.c_str(), value.c_str()); return false; } } /** * Perform an operation on the south plugin * * @param name Name of the operation * @param params The parameters for the operaiton, if any * @return Success or failure of the operation */ bool SouthService::operation(const string& operation, vector<PLUGIN_PARAMETER *>& params) { if (operation.compare("poll") == 0) { if (m_pollType == POLL_ON_DEMAND) { m_doPoll = true; m_pollCV.notify_all(); return true; } else { logger->warn("Received a poll request for a service that is not enabled for on demand polling"); return false; } } else if (southPlugin->hasControl()) { return southPlugin->operation(operation, params); } else { logger->warn("Operation %s attempted on plugin that does not support control", operation.c_str()); return false; } } /** * Process a list of numbers into a vector of integers. * The list of numbers is obtained from a configuration * item. * * @param category The configuration category * @param item Name of the configuration item * @param list The vector to populate */ void SouthService::processNumberList(const ConfigCategory& category, const string& item, vector<unsigned long>& list) { list.clear(); if (!category.itemExists(item)) { Logger::getLogger()->warn("Item %s does not exist", item.c_str()); return; } string value = category.getValue(item); if (value.length() == 0) { Logger::getLogger()->info("Item %s is empty", item.c_str()); return; } const char *ptr = value.c_str(); char *eptr; while (*ptr) { list.push_back(strtoul(ptr, &eptr, 10)); ptr = eptr; if (*ptr == ',') ptr++; } } /** * Calcuate the rate at which the timer should trigger and the repeat * requirement needed to match the requested poll rate */ void SouthService::calculateTimerRate() { string pollType = m_configAdvanced.getValue("pollType"); if (pollType.compare("Fixed Times") == 0) { if (m_pollType == POLL_ON_DEMAND) { lock_guard<mutex> lk(m_pollMutex); m_pollType = POLL_FIXED; m_pollCV.notify_all(); } m_pollType = POLL_FIXED; processNumberList(m_configAdvanced, "pollHours", m_hours); processNumberList(m_configAdvanced, "pollMinutes", m_minutes); processNumberList(m_configAdvanced, "pollSeconds", m_seconds); if (m_minutes.size() == 0 && m_hours.size() != 0) m_minutes.push_back(0); if (m_seconds.size() == 0 && m_minutes.size() != 0) m_seconds.push_back(0); m_desiredRate.tv_sec = 1; m_desiredRate.tv_usec = 0; } else if (pollType.compare("On Demand") == 0) { m_pollType = POLL_ON_DEMAND; } else { if (m_pollType == POLL_ON_DEMAND) { lock_guard<mutex> lk(m_pollMutex); m_pollType = POLL_INTERVAL; m_pollCV.notify_all(); } m_pollType = POLL_INTERVAL; string units = m_configAdvanced.getValue("units"); unsigned long dividend = 1000000; if (units.compare("second") == 0) dividend = 1000000; else if (units.compare("minute") == 0) dividend = 60000000; else if (units.compare("hour") == 0) dividend = 3600000000; m_rateUnits = units; unsigned long usecs = dividend / m_readingsPerSec; if (usecs > MAX_SLEEP * 1000000) { double x = usecs / (MAX_SLEEP * 1000000); m_repeatCnt = ceil(x); usecs /= m_repeatCnt; } else { m_repeatCnt = 1; } m_desiredRate.tv_sec = (int)(usecs / 1000000); m_desiredRate.tv_usec = (int)(usecs % 1000000); } } /** * Find the next fixed time poll time and wait for that time before returning. * This method will also return if m_shutdown is set. * * @return bool True if the return is doe to a poll being required. */ bool SouthService::syncToNextPoll() { time_t tim = time(0); struct tm tm; localtime_r(&tim, &tm); unsigned long waitFor = 1; if (m_hours.size() == 0 && m_minutes.size() == 0 && m_seconds.size() == 0) { Logger::getLogger()->error("Poll time misconfigured."); } else if (m_hours.size() == 0 && m_minutes.size() == 0) { // Only looking at seconds unsigned int i; for (i = 0; i < m_seconds.size() && m_seconds[i] <= (unsigned)tm.tm_sec; i++) { } if (i == m_seconds.size()) { waitFor = (60 - (unsigned)tm.tm_sec) + m_seconds[0]; } else { waitFor = m_seconds[i] - (unsigned)tm.tm_sec; } } else if (m_hours.size() == 0) { unsigned int target_min = (unsigned)tm.tm_min; unsigned int min, sec; for (min = 0; min < m_minutes.size() && m_minutes[min] < target_min; min++) { } if (min == m_minutes.size()) // Reset to start of minute list { min = 0; } if (m_minutes[min] != target_min) // Not this minute { sec = 0; // Always use first setting of seconds } else { for (sec = 0; sec < m_seconds.size() && m_seconds[sec] <= (unsigned)tm.tm_sec; sec++) { } if (sec == m_seconds.size()) { // Too late in this minute use next minute setting sec = 0; min++; if (min >= m_minutes[min]) { min = 0; } } } waitFor = 0; if (m_minutes[min] > (unsigned)tm.tm_min) { waitFor = 60 * (m_minutes[min] - (unsigned)tm.tm_min); } else if (m_minutes[min] < (unsigned)tm.tm_min) { waitFor = 60 * ((60 - (unsigned)tm.tm_min) + m_minutes[min]); } if (m_seconds[sec] > (unsigned)tm.tm_sec) { waitFor += ((unsigned)tm.tm_sec - m_seconds[sec]); } else { waitFor += ((60 - (unsigned)tm.tm_sec) + m_seconds[sec]); } } else // Hours, minutes and seconds { unsigned int hour, min, sec; for (hour = 0; hour < m_hours.size() && m_hours[hour] < (unsigned)tm.tm_hour; hour++) { } if (hour == m_hours.size()) // Reset to start of minute list { min = 0; sec = 0; hour = 0; } else if (m_hours[hour] == (unsigned)tm.tm_hour) // Check for this hour { for (min = 0; min < m_minutes.size() && m_minutes[min] < (unsigned)tm.tm_min; min++) { } if (min < m_minutes.size()) // may still be a trogger in this hor { for (sec = 0; sec < m_seconds.size() && m_seconds[sec] <= (unsigned)tm.tm_sec; sec++) { } if (sec == m_seconds.size()) { // Too late in this minute use next minute setting sec = 0; min++; if (min == m_minutes.size()) { min = 0; sec = 0; hour++; if (m_hours.size() == hour) hour = 0; } } } else { hour++; min = 0; sec = 0; if (m_hours.size() == hour) hour = 0; } } else { hour++; min = 0; sec = 0; if (m_hours.size() == hour) hour = 0; } waitFor = 0; if (m_hours[hour] > (unsigned)tm.tm_hour) { waitFor += 60 * 60 * (m_hours[hour] - (unsigned)tm.tm_hour); } else if (m_minutes[min] < (unsigned)tm.tm_min) { waitFor += 60 * 60 * ((24 - (unsigned)tm.tm_hour) + m_hours[hour]); } if (m_minutes[min] > (unsigned)tm.tm_min) { waitFor += 60 * (m_minutes[min] - (unsigned)tm.tm_min); } else if (m_minutes[min] < (unsigned)tm.tm_min) { waitFor += 60 * ((60 - (unsigned)tm.tm_min) + m_minutes[min]); } if (m_seconds[sec] > (unsigned)tm.tm_sec) { waitFor += ((unsigned)tm.tm_sec - m_seconds[sec]); } else { waitFor += ((60 - (unsigned)tm.tm_sec) + m_seconds[sec]); } } uint64_t exp; while (waitFor) { if (read(m_timerfd, &exp, sizeof(uint64_t)) == -1) return false; waitFor--; if (m_shutdown) return false; if (m_pollType != POLL_FIXED) // Configuration has change to the poll type { return false; } } return true; } /** * Wait until either a shutdown request is received or a poll operation * * @return bool True if the return is due to a new poll request */ bool SouthService::onDemandPoll() { unique_lock<mutex> lk(m_pollMutex); if (! m_shutdown) { m_doPoll = false; m_pollCV.wait(lk); } return m_doPoll; } /** * Check to see if there is a reconfiguration option blocking in another * thread and yield until that reconfiguration has occured. */ void SouthService::checkPendingReconfigure() { while(1) { unsigned int numPendingReconfs; { lock_guard<mutex> guard(m_pendingNewConfigMutex); numPendingReconfs = m_pendingNewConfig.size(); } // if a reconf is pending, make this poll thread yield CPU, sleep_for is needed to sleep this thread for sufficiently long time if (numPendingReconfs) { Logger::getLogger()->debug("SouthService::start(): %d entries in m_pendingNewConfig, poll thread yielding CPU", numPendingReconfs); std::this_thread::sleep_for(std::chrono::milliseconds(200)); } else return; } } /** * Process the setting of allowed features * * @param category The configuration category */ void SouthService::updateFeatures(const ConfigCategory& category) { if (category.itemExists("control")) { string s = category.getValue("control"); m_controlEnabled = s.compare("true") == 0 ? true : false; } if (category.itemExists("debugging")) { string s = category.getValue("debugging"); m_debuggerEnabled = s.compare("true") == 0 ? true : false; if ((m_debugState & DEBUG_ATTACHED) != 0 && m_debuggerEnabled == false) { // Detach the debugger detachDebugger(); } } } /** * Return the state of the pipeline debugger * * @return string JSON document reporting the state of the pipeline debugger */ string SouthService::debugState() { string rval; rval = "{ "; rval += "\"debugger\" : "; if (m_debugState & DEBUG_ATTACHED) { rval += "\"Attached\","; rval += "\"ingress\" : "; if (m_debugState & DEBUG_SUSPENDED) rval += "\"Suspended\", "; else rval += "\"Running\", "; rval += "\"egress\" : "; if (m_debugState & DEBUG_ISOLATED) rval += "\"Isolated\""; else rval += "\"Storage\""; } else if (allowDebugger()) { rval += "\"Detached\""; } else { rval += "\"Disabled\""; } rval += "}"; return rval; } ================================================ FILE: C/services/south/south_api.cpp ================================================ /** * Fledge south service API * * Copyright (c) 2021 Dianomic Systems * * Author: Mark Riddoch, Massimiliano Pinto */ #include <south_api.h> #include <south_service.h> #include <rapidjson/document.h> using namespace std; using namespace rapidjson; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; static SouthApi *api = NULL; /** * Wrapper for the PUT setPoint API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void setPointWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { if (api) api->setPoint(response, request); } /** * Wrapper for the PUT operation API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void operationWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { if (api) api->operation(response, request); } /** * Wrapper for the PUT attach debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void attachDebuggerWrapper(Response response, Request request) { if (api) api->attachDebugger(response, request); } /** * Wrapper for the PUT detach debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void detachDebuggerWrapper(Response response, Request request) { if (api) api->detachDebugger(response, request); } /** * Wrapper for the PUT set debugger buffer size API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void setDebuggerBufferWrapper(Response response, Request request) { if (api) api->setDebuggerBuffer(response, request); } /** * Wrapper for the GET debugger buffer API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void getDebuggerBufferWrapper(Response response, Request request) { if (api) api->getDebuggerBuffer(response, request); } /** * Wrapper for the PUT debugger isolate API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void isolateDebuggerWrapper(Response response, Request request) { if (api) api->isolateDebugger(response, request); } /** * Wrapper for the PUT debugger suspend API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void suspendDebuggerWrapper(Response response, Request request) { if (api) api->suspendDebugger(response, request); } /** * Wrapper for the PUT step debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void stepDebuggerWrapper(Response response, Request request) { if (api) api->stepDebugger(response, request); } /** * Wrapper for the PUT replay debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void replayDebuggerWrapper(Response response, Request request) { if (api) api->replayDebugger(response, request); } /** * Wrapper for the GET state debugger API call * * @param response The HTTP Response to send * @param request The HTTP Request */ static void stateDebuggerWrapper(Response response, Request request) { if (api) api->stateDebugger(response, request); } /** * Wrapper for thread creation that is used to start the API */ static void startService() { api->startServer(); } /** * South API class constructor * * @param service The SouthService class this is the API for */ SouthApi::SouthApi(SouthService *service) : m_service(service), m_thread(NULL) { m_logger = Logger::getLogger(); m_server = new HttpServer(); m_server->config.port = 0; m_server->config.thread_pool_size = 1; // AuthenticationMiddleware for PUT regexp paths: use lambda funcion, passing the class object m_server->resource[SETPOINT]["PUT"] = [this](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { m_service->AuthenticationMiddlewarePUT(response, request, setPointWrapper); }; m_server->resource[OPERATION]["PUT"] = [this](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { m_service->AuthenticationMiddlewarePUT(response, request, operationWrapper); }; // Add the debugger entry points m_server->resource[DEBUG_ATTACH]["PUT"] = attachDebuggerWrapper; m_server->resource[DEBUG_DETACH]["PUT"] = detachDebuggerWrapper; m_server->resource[DEBUG_BUFFER]["POST"] = setDebuggerBufferWrapper; m_server->resource[DEBUG_BUFFER]["GET"] = getDebuggerBufferWrapper; m_server->resource[DEBUG_ISOLATE]["PUT"] = isolateDebuggerWrapper; m_server->resource[DEBUG_SUSPEND]["PUT"] = suspendDebuggerWrapper; m_server->resource[DEBUG_STEP]["PUT"] = stepDebuggerWrapper; m_server->resource[DEBUG_REPLAY]["PUT"] = replayDebuggerWrapper; m_server->resource[DEBUG_STATE]["GET"] = stateDebuggerWrapper; api = this; m_thread = new thread(startService); } /** * Destroy the API. * * Stop the service and wait fo rthe thread to terminate. */ SouthApi::~SouthApi() { if (m_thread) { m_server->stop(); m_thread->join(); delete m_thread; } if (m_server) delete m_server; } /** * Called on the API service thread. Start the listener for HTTP requests */ void SouthApi::startServer() { m_server->start(); } /** * Return the port the service is listening on */ unsigned short SouthApi::getListenerPort() { int max_wait = 10; // Need to make sure the server thread has started while (m_server->getLocalPort() == 0 && max_wait-- > 0) usleep(100); return m_server->getLocalPort(); } /** * Implement the setPoint PUT request. Caues the write operation on * the south plugin to be called with each of the set point parameters * * @param response The HTTP response * @param request The HTTP request */ void SouthApi::setPoint(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { if (m_service->allowControl()) { string payload = request->content.string(); try { Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("values") && doc["values"].IsObject()) { bool status = true; Value& values = doc["values"]; for (Value::ConstMemberIterator itr = values.MemberBegin(); itr != values.MemberEnd(); ++itr) { string name = itr->name.GetString(); if (itr->value.IsString()) { string value = itr->value.GetString(); if (!m_service->setPoint(name, value)) { status = false; } } } if (status) { string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "failed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } return; } else { string responsePayload = QUOTE({ "message" : "Missing 'values' object in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } catch (exception &e) { char buffer[80]; snprintf(buffer, sizeof(buffer), "\"Exception: %s\"", e.what()); string responsePayload = QUOTE({ "message" : buffer }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Failed, control features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke an operation on the south plugin * * @param response The HTTP response * @param request The HTTP request */ void SouthApi::operation(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { if (m_service->allowControl()) { string payload = request->content.string(); try { Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { string operation; if (doc.HasMember("operation") && doc["operation"].IsString()) { operation = doc["operation"].GetString(); vector<PLUGIN_PARAMETER *> parameters; if (doc.HasMember("parameters") && doc["parameters"].IsObject()) { Value& values = doc["parameters"]; for (Value::ConstMemberIterator itr = values.MemberBegin(); itr != values.MemberEnd(); ++itr) { string name = itr->name.GetString(); if (itr->value.IsString()) { string value = itr->value.GetString(); PLUGIN_PARAMETER *param = new PLUGIN_PARAMETER; param->name = name; param->value = value; parameters.push_back(param); } } } else if (doc.HasMember("parameters")) { string responsePayload = QUOTE({ "message" : "If present, parameters of an operation must be a JSON object" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } bool status = m_service->operation(operation, parameters); for (auto param : parameters) delete param; if (status) { string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "plugin returned failed status for operation" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } return; } else { string responsePayload = QUOTE({ "message" : "Missing 'operation' in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } } else { string responsePayload = QUOTE({ "status" : "failed to parse operation payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } } catch (exception &e) { } string responsePayload = QUOTE({ "status" : "failed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } else { string responsePayload = QUOTE({ "status" : "Failed, control features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke debugger attach on the south plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void SouthApi::attachDebugger(Response response, Request /*request*/) { if (m_service->allowDebugger()) { bool status = m_service->attachDebugger(); if (status) { string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "Failed to attach the debugger to the pipeline. A pipeline must contain at least one filter in order to attach the debugger to the pipeline." }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke debugger detach on the south plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void SouthApi::detachDebugger(Response response, Request /*request*/) { string responsePayload; if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { m_service->detachDebugger(); responsePayload = QUOTE({ "status" : "ok" }); } else { responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); } m_service->respond(response, responsePayload); } else { responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke set debugger buffer size on the south plugin * * @param response The HTTP response * @param request The HTTP request */ void SouthApi::setDebuggerBuffer(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("size")) { if (doc["size"].IsUint()) { unsigned int size = doc["size"].GetUint(); m_service->setDebuggerBuffer(size); string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'size' should be an unsigned integer" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'size' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke get debugger buffer size on the south plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void SouthApi::getDebuggerBuffer(Response response, Request /*request*/) { if (m_service->allowDebugger()) { string result; if (m_service->debuggerAttached()) { result = m_service->getDebuggerBuffer(); } else { result = QUOTE({"status" : "Debugger is not attached to the service" }); } m_service->respond(response, result); } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke isolate debugger handler on the south plugin * * @param response The HTTP response * @param request The HTTP request */ void SouthApi::isolateDebugger(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("state")) { if (doc["state"].IsString()) { string state = doc["state"].GetString(); if (state.compare("discard") == 0) m_service->isolateDebugger(true); else if (state.compare("store") == 0) m_service->isolateDebugger(false); else { string responsePayload = QUOTE({ "message" : "The value of 'state' should be one of 'discard' or 'store'" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'size' should be a string with either 'discard' or 'store'" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'state' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke suspend debugger handler on the south plugin * * @param response The HTTP response * @param request The HTTP request */ void SouthApi::suspendDebugger(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("state")) { if (doc["state"].IsString()) { string state = doc["state"].GetString(); if (state.compare("suspend") == 0) m_service->suspendDebugger(true); else if (state.compare("resume") == 0) m_service->suspendDebugger(false); else { string responsePayload = QUOTE({ "message" : "The value of 'state' should be one of 'suspend' or 'resume'" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); return; } string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'size' should be a string with either 'suspend' or 'resume'" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'state' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke set debugger step command on the south plugin * * @param response The HTTP response * @param request The HTTP request */ void SouthApi::stepDebugger(Response response, Request request) { if (m_service->allowDebugger()) { if (m_service->debuggerAttached()) { string payload = request->content.string(); Document doc; ParseResult result = doc.Parse(payload.c_str()); if (result) { if (doc.HasMember("steps")) { if (doc["steps"].IsUint()) { unsigned int steps = doc["steps"].GetUint(); m_service->stepDebugger(steps); string responsePayload = QUOTE({ "status" : "ok" }); m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "message" : "The value of 'steps' should be an unsigned integer" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Missing 'steps' item in payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "message" : "Failed to parse request payload" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_bad_request,responsePayload); } } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke debugger replay on the south plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void SouthApi::replayDebugger(Response response, Request /*request*/) { if (m_service->allowDebugger()) { // TODO Handle pre-requisites string responsePayload; if (m_service->debuggerAttached()) { if (m_service->replayDebugger()) { responsePayload = QUOTE({ "status" : "ok" }); } else { responsePayload = QUOTE({ "status" : "No data to replay" }); } } else { responsePayload = QUOTE({"status" : "Debugger is not attached to the service" }); } m_service->respond(response, responsePayload); } else { string responsePayload = QUOTE({ "status" : "Failed, debugger features are not allowed" }); m_service->respond(response, SimpleWeb::StatusCode::client_error_forbidden,responsePayload); } } /** * Invoke debugger state on the south plugin * * @param response The HTTP response * @param request The HTTP request - unused */ void SouthApi::stateDebugger(Response response, Request /*request*/) { string payload = m_service->debugState(); m_service->respond(response, payload); } ================================================ FILE: C/services/south/south_plugin.cpp ================================================ /* * Fledge south service. * * Copyright (c) 2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <south_plugin.h> #include <south_service.h> #include <config_category.h> #include <logger.h> #include <exception> #include <typeinfo> #include <stdexcept> #include <mutex> using namespace std; // mutex between various plugin methods, since reconfigure changes the handle // object itself and marks previous handle as garbage collectible by Python runtime std::mutex mtx2; /** * Constructor for the class that wraps the south plugin * * Create a set of function points that resolve to the loaded plugin and * enclose in the class. * */ SouthPlugin::SouthPlugin(PLUGIN_HANDLE handle, const ConfigCategory& category) : Plugin(handle) { m_started = false; // Set started indicator, overrided by async plugins only // Call the init method of the plugin PLUGIN_HANDLE (*pluginInit)(const void *) = (PLUGIN_HANDLE (*)(const void *)) manager->resolveSymbol(handle, "plugin_init"); instance = (*pluginInit)(&category); if (!instance) { Logger::getLogger()->error("plugin_init returned NULL, cannot proceed"); throw new exception(); } // Setup the function pointers to the plugin pluginStartPtr = (void (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_start"); const char *pluginInterfaceVer = manager->getInfo(handle)->interface; if (pluginInterfaceVer[0]=='1' && pluginInterfaceVer[1]=='.') { pluginPollPtr = (Reading (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_poll"); } else if (pluginInterfaceVer[0]=='2' && pluginInterfaceVer[1]=='.') { pluginPollPtrV2 = (std::vector<Reading*>* (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_poll"); } else { Logger::getLogger()->error("Invalid plugin interface version '%s', assuming version 1.x", pluginInterfaceVer); pluginPollPtr = (Reading (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_poll"); } pluginReconfigurePtr = (void (*)(PLUGIN_HANDLE*, const std::string&)) manager->resolveSymbol(handle, "plugin_reconfigure"); pluginShutdownPtr = (void (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); if (isAsync()) { if (pluginInterfaceVer[0]=='1' && pluginInterfaceVer[1]=='.') { pluginRegisterPtr = (void (*)(PLUGIN_HANDLE, INGEST_CB cb, void *data)) manager->resolveSymbol(handle, "plugin_register_ingest"); } else if (pluginInterfaceVer[0]=='2' && pluginInterfaceVer[1]=='.') { pluginRegisterPtrV2 = (void (*)(PLUGIN_HANDLE, INGEST_CB2 cb, void *data)) manager->resolveSymbol(handle, "plugin_register_ingest"); } else { Logger::getLogger()->error("Invalid plugin interface version '%s', assuming version 1.x", pluginInterfaceVer); pluginRegisterPtr = (void (*)(PLUGIN_HANDLE, INGEST_CB cb, void *data)) manager->resolveSymbol(handle, "plugin_register_ingest"); } } pluginShutdownDataPtr = (string (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginStartDataPtr = (void (*)(const PLUGIN_HANDLE, const string& storedData)) manager->resolveSymbol(handle, "plugin_start"); pluginWritePtr = NULL; pluginOperationPtr = NULL; if (hasControl()) { pluginWritePtr = (bool (*)(const PLUGIN_HANDLE, const std::string&, const std::string&)) manager->resolveSymbol(handle, "plugin_write"); pluginOperationPtr = (bool (*)(const PLUGIN_HANDLE, const std::string&, int, PLUGIN_PARAMETER **)) manager->resolveSymbol(handle, "plugin_operation"); } } /** * South plugin destructor */ SouthPlugin::~SouthPlugin() { } /** * Call the start method in the plugin */ void SouthPlugin::start() { lock_guard<mutex> guard(mtx2); try { this->pluginStartPtr(instance); m_started = true; // Set start indicator return; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin start(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin start(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the start method in the plugin */ void SouthPlugin::startData(const string& data) { lock_guard<mutex> guard(mtx2); try { this->pluginStartDataPtr(instance, data); m_started = true; // Set start indicator return; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin start(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin start(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the poll method in the plugin */ Reading SouthPlugin::poll() { lock_guard<mutex> guard(mtx2); try { return this->pluginPollPtr(instance); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin poll(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin poll(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the poll method in the plugin supporting interface ver 2.x */ ReadingSet* SouthPlugin::pollV2() { lock_guard<mutex> guard(mtx2); try { std::vector<Reading *> *vec = this->pluginPollPtrV2(instance); if(vec) { ReadingSet *set = new ReadingSet(vec); vec->clear(); delete vec; return set; // this->pluginPollPtrV2(instance); } else return NULL; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in v2 south plugin poll(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in v2 south plugin poll(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the reconfigure method in the plugin */ void SouthPlugin::reconfigure(const string& newConfig) { lock_guard<mutex> guard(mtx2); try { this->pluginReconfigurePtr(&instance, newConfig); if (!instance) { Logger::getLogger()->error("plugin_reconfigure returned NULL, cannot proceed"); throw new exception(); } return; } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin reconfigure(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin reconfigure(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the shutdown method in the plugin */ void SouthPlugin::shutdown() { lock_guard<mutex> guard(mtx2); try { return this->pluginShutdownPtr(instance); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin shutdown(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin shutdown(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the shutdown method in the plugin */ string SouthPlugin::shutdownSaveData() { lock_guard<mutex> guard(mtx2); try { return this->pluginShutdownDataPtr(instance); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin shutdown(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin shutdown(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } void SouthPlugin::registerIngest(INGEST_CB cb, void *data) { lock_guard<mutex> guard(mtx2); try { return this->pluginRegisterPtr(instance, cb, data); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin registerIngest(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin registerIngest(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } void SouthPlugin::registerIngestV2(INGEST_CB2 cb, void *data) { lock_guard<mutex> guard(mtx2); try { return this->pluginRegisterPtrV2(instance, cb, data); } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception raised in south plugin registerIngestV2(), %s", e.what()); throw; } catch (...) { std::exception_ptr p = std::current_exception(); Logger::getLogger()->fatal("Unhandled exception raised in south plugin registerIngestV2(), %s", p ? p.__cxa_exception_type()->name() : "unknown exception"); throw; } } /** * Call the write entry point of the plugin * * @param name The name of the parameter to change * @param value The value to set the parameter */ bool SouthPlugin::write(const string& name, const string& value) { try { if (pluginWritePtr) { bool run = true; // Check plugin_start is done for async plugin before calling pluginWritePtr if (isAsync()) { int tries = 0; while (!m_started) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); Logger::getLogger()->debug("South plugin write call is on hold, try %d", tries); if (tries > 20) { break; } tries++; } run = m_started; } if (run) { return this->pluginWritePtr(instance, name, value); } else { Logger::getLogger()->error("South plugin write canceled after waiting for 2 seconds"); return false; } } } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception in plugin write operation: %s", e.what()); } return false; } /** * Call the plugin operation entry point with the operation to execute * * @param name The name of the operation * @param parameters The paramters for the operation. * @return bool Status of the operation */ bool SouthPlugin::operation(const string& name, vector<PLUGIN_PARAMETER *>& parameters) { bool status = false; if (! this->pluginOperationPtr) { Logger::getLogger()->error( "Attempt to invoke an operation '%s' on a plugin that does not provide operation entry point", name.c_str()); return status; } unsigned int count = parameters.size(); PLUGIN_PARAMETER **params = (PLUGIN_PARAMETER **)malloc(sizeof(PLUGIN_PARAMETER *) * (count + 1)); if (params == NULL) { Logger::getLogger()->fatal("Unable to allocate parameters, out of memory"); return status; } for (unsigned int i = 0; i < parameters.size(); i++) { params[i] = parameters[i]; } params[count] = NULL; try { bool run = true; // Check plugin_start is done for async plugin before calling pluginOperationPtr if (isAsync()) { int tries = 0; while (!m_started) { std::this_thread::sleep_for(std::chrono::milliseconds(100)); Logger::getLogger()->debug("South plugin operation is on hold, try %d", tries); if (tries > 20) { break; } tries++; } run = m_started; } if (run) { status = this->pluginOperationPtr(instance, name, (int)count, params); } else { Logger::getLogger()->error("South plugin operation canceled after waiting for 2 seconds"); return false; } } catch (exception& e) { Logger::getLogger()->fatal("Unhandled exception in plugin operation: %s", e.what()); } free(params); return status; } ================================================ FILE: C/services/south-plugin-interfaces/python/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(south-plugin-python-interface) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) # Find source files file(GLOB SOURCES python_plugin_interface.cpp) # Find Python.h 3.x dev/lib package if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include ../../../common/include ../../../services/common/include ../../../services/south/include ../../../thirdparty/rapidjson/include) include_directories(../../../services/common-plugin-interfaces/python/include) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../lib) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) target_link_libraries(${PROJECT_NAME} ${DLLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) endif() set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) # Install library install(TARGETS ${PROJECT_NAME} DESTINATION fledge/lib) ================================================ FILE: C/services/south-plugin-interfaces/python/async_ingest_pymodule/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 2.6.0) project(async_ingest) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") set(DLLIB -ldl) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) # Find source files file(GLOB SOURCES ingest_callback_pymodule.cpp) # Find Python 3.5 or higher dev/lib/interp package #find_package(PythonInterp 3.5 REQUIRED) if(${CMAKE_VERSION} VERSION_LESS "3.12.0") pkg_check_modules(PYTHON REQUIRED python3) else() find_package(Python3 REQUIRED COMPONENTS Interpreter Development) endif() # Include header files include_directories(include ../../../../common/include ../../../../services/common/include ../../../../services/south/include ../../../../thirdparty/rapidjson/include) # Add Python 3.x header files if(${CMAKE_VERSION} VERSION_LESS "3.12.0") include_directories(${PYTHON_INCLUDE_DIRS}) else() include_directories(${Python3_INCLUDE_DIRS}) endif() if(${CMAKE_VERSION} VERSION_LESS "3.12.0") link_directories(${PYTHON_LIBRARY_DIRS}) else() link_directories(${Python3_LIBRARY_DIRS}) endif() link_directories(${PROJECT_BINARY_DIR}/../../../../lib) set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${PROJECT_BINARY_DIR}/../../../../../../python) # Create shared library add_library(${PROJECT_NAME} SHARED ${SOURCES}) # Add Python 3.x library if(${CMAKE_VERSION} VERSION_LESS "3.12.0") target_link_libraries(${PROJECT_NAME} ${PYTHON_LIBRARIES}) else() target_link_libraries(${PROJECT_NAME} ${Python3_LIBRARIES}) endif() target_link_libraries(${PROJECT_NAME} ${DLLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE C) set_target_properties(${PROJECT_NAME} PROPERTIES SOVERSION 1) set_target_properties(${PROJECT_NAME} PROPERTIES VERSION 1) set_target_properties(${PROJECT_NAME} PROPERTIES PREFIX "") # Install libraries install(TARGETS ${PROJECT_NAME} DESTINATION fledge/python) ================================================ FILE: C/services/south-plugin-interfaces/python/async_ingest_pymodule/ingest_callback_pymodule.cpp ================================================ /* * Fledge python module for async plugin ingest callback * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <reading.h> #include <logger.h> #include <Python.h> #include <vector> #include <pythonreadingset.h> extern "C" { typedef void (*INGEST_CB2)(void *, PythonReadingSet *); void plugin_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, PyObject *readingsObj); static PyObject *IngestError; static PyObject * ingest_callback(PyObject *self, PyObject *args) { PyObject *readingList; PyObject *callback; PyObject *ingestData; if (!PyArg_ParseTuple(args, "OOO", &callback, &ingestData, &readingList)) return NULL; plugin_ingest_fn(callback, ingestData, readingList); Py_INCREF(Py_None); return Py_None; } static PyMethodDef IngestMethods[] = { {"ingest_callback", ingest_callback, METH_VARARGS, "Invoke ingest callback"}, {NULL, NULL, 0, NULL} /* Sentinel */ }; static struct PyModuleDef ingestmodule = { PyModuleDef_HEAD_INIT, "async_ingest", /* name of module */ NULL, /* module documentation, may be NULL */ -1, /* size of per-interpreter state of the module, or -1 if the module keeps state in global variables. */ IngestMethods }; PyMODINIT_FUNC PyInit_async_ingest(void) { PyObject *m; m = PyModule_Create(&ingestmodule); if (m == NULL) return NULL; Logger::getLogger()->debug("PyModule_Create() succeeded"); IngestError = PyErr_NewException("ingest.error", NULL, NULL); Py_INCREF(IngestError); PyModule_AddObject(m, "error", IngestError); Logger::getLogger()->debug("PyInit_ingest() returning"); return m; } void plugin_ingest_fn(PyObject *ingest_callback, PyObject *ingest_obj_ref_data, PyObject *readingsObj) { if (ingest_callback == NULL || ingest_obj_ref_data == NULL || readingsObj == NULL) { Logger::getLogger()->error("Py2C interface: plugin_ingest_fn: ingest_callback=%p, ingest_obj_ref_data=%p, readingsObj=%p", ingest_callback, ingest_obj_ref_data, readingsObj); return; } PythonReadingSet *pyReadingSet = NULL; try { pyReadingSet = new PythonReadingSet(readingsObj); } catch (std::exception e) { Logger::getLogger()->warn("PythonReadingSet c'tor failed, error: %s", e.what()); pyReadingSet = NULL; } // Py_XDECREF(readingsObj); if(pyReadingSet) { INGEST_CB2 cb = (INGEST_CB2) PyCapsule_GetPointer(ingest_callback, NULL); void *data = PyCapsule_GetPointer(ingest_obj_ref_data, NULL); (*cb)(data, pyReadingSet); } else Logger::getLogger()->error("Py2C interface: plugin_ingest_fn: PythonReadingSet c'tor returned NULL"); } }; // end of extern "C" block ================================================ FILE: C/services/south-plugin-interfaces/python/python_plugin_interface.cpp ================================================ /* * Fledge south plugin interface related * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <logger.h> #include <config_category.h> #include <reading_set.h> #include <mutex> #include <south_plugin.h> #include <pyruntime.h> #include <Python.h> #include <python_plugin_common_interface.h> #include <pythonreadingset.h> #define SHIM_SCRIPT_NAME "south_shim" using namespace std; extern "C" { extern PLUGIN_INFORMATION *plugin_info_fn(); extern PLUGIN_HANDLE plugin_init_fn(ConfigCategory *); extern void plugin_reconfigure_fn(PLUGIN_HANDLE*, const std::string&); extern void plugin_shutdown_fn(PLUGIN_HANDLE); extern void logErrorMessage(); extern PLUGIN_INFORMATION *Py2C_PluginInfo(PyObject *); // South plugin entry points std::vector<Reading *>* plugin_poll_fn(PLUGIN_HANDLE); void plugin_start_fn(PLUGIN_HANDLE handle); void plugin_register_ingest_fn(PLUGIN_HANDLE handle,INGEST_CB2 cb,void * data); bool plugin_write_fn(PLUGIN_HANDLE handle, const std::string& name, const std::string& value); bool plugin_operation_fn(PLUGIN_HANDLE handle, string operation, int parameterCount, PLUGIN_PARAMETER *parameters[]); /** * Constructor for PythonPluginHandle */ void *PluginInterfaceInit(const char *pluginName, const char * pluginPathName) { bool initialisePython = false; // Set plugin name, also for methods in common-plugin-interfaces/python gPluginName = pluginName; string fledgePythonDir; string fledgeRootDir(getenv("FLEDGE_ROOT")); fledgePythonDir = fledgeRootDir + "/python"; string southRootPath = fledgePythonDir + string(R"(/fledge/plugins/south/)") + string(pluginName); Logger::getLogger()->info("%s:%d:, southRootPath=%s", __FUNCTION__, __LINE__, southRootPath.c_str()); // Embedded Python 3.5 program name wchar_t *programName = Py_DecodeLocale(pluginName, NULL); Py_SetProgramName(programName); PyMem_RawFree(programName); PythonRuntime::getPythonRuntime(); // Acquire GIL PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->info("SouthPlugin %s:%d: " "southRootPath=%s, fledgePythonDir=%s, plugin '%s'", __FUNCTION__, __LINE__, southRootPath.c_str(), fledgePythonDir.c_str(), pluginName); // Set Python path for embedded Python 3.x // Get current sys.path - borrowed reference PyObject* sysPath = PySys_GetObject((char *)"path"); PyList_Append(sysPath, PyUnicode_FromString((char *) southRootPath.c_str())); PyList_Append(sysPath, PyUnicode_FromString((char *) fledgePythonDir.c_str())); // Set sys.argv for embedded Python 3.5 int argc = 2; wchar_t* argv[2]; argv[0] = Py_DecodeLocale("", NULL); argv[1] = Py_DecodeLocale(pluginName, NULL); PySys_SetArgv(argc, argv); // 2) Import Python script PyObject *pModule = PyImport_ImportModule(pluginName); // Check whether the Python module has been imported if (!pModule) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("PluginInterfaceInit: cannot import Python 3.5 script " "'%s' from '%s' : plugin '%s'", pluginName, southRootPath.c_str(), pluginName); } else { std::pair<std::map<string, PythonModule*>::iterator, bool> ret; if (pythonModules) { // Add element ret = pythonModules->insert(pair<string, PythonModule*> (string(pluginName), new PythonModule(pModule, initialisePython, string(pluginName), PLUGIN_TYPE_SOUTH, // New Python interpteter not set NULL))); } // Check result if (!pythonModules || ret.second == false) { Logger::getLogger()->fatal("%s:%d: python module not added to the map " "of loaded plugins, pModule=%p, plugin '%s', aborting.", __FUNCTION__, __LINE__, pModule, pluginName); Py_CLEAR(pModule); return NULL; } else { Logger::getLogger()->debug("%s:%d: python module loaded successfully, pModule=%p, plugin '%s'", __FUNCTION__, __LINE__, pModule, pluginName); } } // Release GIL PyGILState_Release(state); return pModule; } /** * Function to invoke 'plugin_write' function in python plugin * * @param handle Plugin handle from plugin_init_fn * @param name Name of parameter to write * @param value Value to be written to that parameter */ bool plugin_write_fn(PLUGIN_HANDLE handle, const std::string& name, const std::string& value) { bool rv = false; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_write(): " "handle is NULL"); return rv; } if (!pythonHandles) { Logger::getLogger()->error("pythonHandles map is NULL " "in plugin_write, plugin handle '%p'", handle); return rv; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_write(): " "pModule is NULL, plugin handle '%p'", handle); return rv; } std::mutex mtx; PyObject* pFunc; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("plugin_handle: plugin_write(): " "pModule=%p, handle=%p, plugin '%s'", it->second->m_module, handle, it->second->m_name.c_str()); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_write"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_write' " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return rv; } if (!PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method plugin_write " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return rv; } Logger::getLogger()->debug("plugin_write with name=%s, value=%s", name.c_str(), value.c_str()); // Call Python method passing an object and 2 C-style strings PyObject* pReturn = PyObject_CallFunction(pFunc, "Oss", handle, name.c_str(), value.c_str()); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_write : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } else { if (PyBool_Check(pReturn)) { rv = PyObject_IsTrue(pReturn); Logger::getLogger()->info("plugin_write() returned %s", rv?"TRUE":"FALSE"); } else { Logger::getLogger()->error("plugin_handle: plugin_write(): " "got result object '%p' of unexpected type %s, plugin '%s'", pReturn, pReturn->ob_type->tp_name, it->second->m_name.c_str()); } Py_CLEAR(pReturn); } PyGILState_Release(state); return rv; } /** * Function to invoke 'plugin_operation' function in python plugin * * @param handle Plugin handle from plugin_init_fn * @param operation Name of operation * @param parameterCount Number of parameters in Parameter list * @param parameters Parameter list */ bool plugin_operation_fn(PLUGIN_HANDLE handle, string operation, int parameterCount, PLUGIN_PARAMETER *parameters[]) { bool rv = false; if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_operation(): " "handle is NULL"); return rv; } if (!pythonHandles) { Logger::getLogger()->error("pythonHandles map is NULL " "in plugin_operation, plugin handle '%p'", handle); return rv; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_operation(): " "pModule is NULL, plugin handle '%p'", handle); return rv; } std::mutex mtx; PyObject* pFunc; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); Logger::getLogger()->debug("plugin_handle: plugin_operation(): " "pModule=%p, *handle=%p, plugin '%s'", it->second->m_module, handle, it->second->m_name.c_str()); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_operation"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find method 'plugin_operation' " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return rv; } if (!PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method plugin_operation " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return rv; } Logger::getLogger()->debug("plugin_operation with operation=%s, parameterCount=%d", operation.c_str(), parameterCount); PyObject *paramsList = PyList_New(parameterCount); for (int i=0; i<parameterCount; i++) { PyList_SetItem(paramsList, i, Py_BuildValue("(ss)", parameters[i]->name.c_str(), parameters[i]->value.c_str()) ); } // Call Python method passing an object and 2 C-style strings PyObject* pReturn = PyObject_CallFunction(pFunc, "OsO", handle, operation.c_str(), paramsList); Py_CLEAR(pFunc); Py_CLEAR(paramsList); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_operation : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } else { if (PyBool_Check(pReturn)) { rv = PyObject_IsTrue(pReturn); Logger::getLogger()->info("plugin_operation() returned %s", rv?"TRUE":"FALSE"); } else { Logger::getLogger()->error("plugin_handle: plugin_operation(): " "got result object '%p' of unexpected type %s, plugin '%s'", pReturn, pReturn->ob_type->tp_name, it->second->m_name.c_str()); } Py_CLEAR(pReturn); } PyGILState_Release(state); return rv; } /** * Returns function pointer that can be invoked to call '_sym' function * in python plugin */ void* PluginInterfaceResolveSymbol(const char *_sym, const string& name) { string sym(_sym); if (!sym.compare("plugin_info")) return (void *) plugin_info_fn; else if (!sym.compare("plugin_init")) return (void *) plugin_init_fn; else if (!sym.compare("plugin_poll")) return (void *) plugin_poll_fn; else if (!sym.compare("plugin_shutdown")) return (void *) plugin_shutdown_fn; else if (!sym.compare("plugin_reconfigure")) return (void *) plugin_reconfigure_fn; else if (!sym.compare("plugin_start")) return (void *) plugin_start_fn; else if (!sym.compare("plugin_register_ingest")) return (void *) plugin_register_ingest_fn; else if (!sym.compare("plugin_write")) return (void *) plugin_write_fn; else if (!sym.compare("plugin_operation")) return (void *) plugin_operation_fn; else { Logger::getLogger()->fatal("PluginInterfaceResolveSymbol can not find symbol '%s' " "in the South Python plugin interface library, loaded plugin '%s'", _sym, name.c_str()); return NULL; } } /** * Function to invoke 'plugin_poll' function in python plugin * * @param handle Plugin handle from plugin_init_fn * @return Vector of Reading data */ std::vector<Reading *>* plugin_poll_fn(PLUGIN_HANDLE handle) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_poll_fn: " "handle is NULL"); return NULL; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_poll_fn, handle '%p'", handle); return NULL; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_poll(): " "pModule is NULL, plugin handle '%p'", handle); return NULL; } std::mutex mtx; PyObject* pFunc; lock_guard<mutex> guard(mtx); PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_poll"); if (!pFunc) { Logger::getLogger()->fatal("Cannot find 'plugin_poll' method " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return NULL; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->fatal("Cannot call method 'plugin_poll' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return NULL; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, "O", handle); Py_CLEAR(pFunc); // Handle returned data if (!pReturn) { // Errors while getting result object Logger::getLogger()->error("Called python script method 'plugin_poll' : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); PyGILState_Release(state); return NULL; } else { // Get reading data PythonReadingSet *pyReadingSet = NULL; // Valid ReadingSet would be in the form of python dict or list if (PyList_Check(pReturn) || PyDict_Check(pReturn)) { try { pyReadingSet = new PythonReadingSet(pReturn); } catch (std::exception e) { Logger::getLogger()->warn("Failed to create a Python ReadingSet from the data returned by the south plugin poll routine, %s", e.what()); pyReadingSet = NULL; } } // Remove pReturn object Py_CLEAR(pReturn); PyGILState_Release(state); if (pyReadingSet) { std::vector<Reading *> *vec2 = pyReadingSet->moveAllReadings(); delete pyReadingSet; return vec2; } else { return NULL; } } } /** * Function to invoke 'plugin_start' function in python plugin * * @param handle Plugin handle from plugin_init_fn */ void plugin_start_fn(PLUGIN_HANDLE handle) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_start_fn: " "handle is NULL"); return; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_start_fn, handle '%p'", handle); return; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_start(): " "pModule is NULL, plugin handle '%p'", handle); return; } PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_start"); if (!pFunc) { Logger::getLogger()->warn("Cannot find 'plugin_start' method " "in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->warn("Cannot call method 'plugin_start' " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } // Call Python method passing an object PyObject* pReturn = PyObject_CallFunction(pFunc, "O", handle); Py_CLEAR(pFunc); // Handle return if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_start : " "error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } PyGILState_Release(state); } /** * Function to invoke 'plugin_register_ingest' function in python plugin * * @param handle Plugin handle from plugin_init_fn * @param cb Ingest routine to call * @param data Data to pass to Ingest routine */ void plugin_register_ingest_fn(PLUGIN_HANDLE handle, INGEST_CB2 cb, void *data) { if (!handle) { Logger::getLogger()->fatal("plugin_handle: plugin_register_ingest_fn: " "handle is NULL"); return; } if (!pythonHandles) { Logger::getLogger()->error("pythonModules map is NULL " "in plugin_register_ingest_fn, handle '%p'", handle); return; } // Look for Python module for handle key auto it = pythonHandles->find(handle); if (it == pythonHandles->end() || !it->second || !it->second->m_module) { Logger::getLogger()->fatal("plugin_handle: plugin_register_ingest(): " "pModule is NULL, plugin handle '%p'", handle); return; } PyObject* pFunc; PyGILState_STATE state = PyGILState_Ensure(); // Fetch required method in loaded object pFunc = PyObject_GetAttrString(it->second->m_module, "plugin_register_ingest"); if (!pFunc) { Logger::getLogger()->warn("Cannot find 'plugin_register_ingest' " "method in loaded python module '%s'", it->second->m_name.c_str()); PyGILState_Release(state); return; } if (!pFunc || !PyCallable_Check(pFunc)) { // Failure if (PyErr_Occurred()) { logErrorMessage(); } Logger::getLogger()->warn("Cannot call method plugin_register_ingest " "in loaded python module '%s'", it->second->m_name.c_str()); Py_CLEAR(pFunc); PyGILState_Release(state); return; } // Call Python method passing an object PyObject* ingest_fn = PyCapsule_New((void *)cb, NULL, NULL); PyObject* ingest_ref = PyCapsule_New((void *)data, NULL, NULL); PyObject* pReturn = PyObject_CallFunction(pFunc, "OOO", handle, ingest_fn, ingest_ref); Py_CLEAR(pFunc); Py_CLEAR(ingest_fn); // Handle returned data if (!pReturn) { Logger::getLogger()->error("Called python script method plugin_register_ingest " ": error while getting result object, plugin '%s'", it->second->m_name.c_str()); logErrorMessage(); } else { Logger::getLogger()->info("plugin_handle: plugin_register_ingest(): " "got result object '%p', plugin '%s'", pReturn, it->second->m_name.c_str()); } PyGILState_Release(state); } }; ================================================ FILE: C/services/storage/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (Storage) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(CMAKE_CXX_FLAGS_PROFILING "-O2 -pg") set(DLLIB -ldl) set(UUIDLIB -luuid) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) set(EXEC fledge.services.storage) include_directories(. include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include ../common/include ../../common/include) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() file(GLOB storage_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(${EXEC} ${storage_src} ${service_common_src} ${common_src}) target_link_libraries(${EXEC} ${Boost_LIBRARIES}) target_link_libraries(${EXEC} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${EXEC} ${DLLIB}) target_link_libraries(${EXEC} ${UUIDLIB}) target_link_libraries(${EXEC} ${COMMON_LIB}) target_link_libraries(${EXEC} ${SERVICE_COMMON_LIB}) install(TARGETS ${EXEC} RUNTIME DESTINATION fledge/services) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(${EXEC} ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(${EXEC} ws2_32 wsock32) endif() endif() # Set profiling flags if 'Profiling' build if(CMAKE_BUILD_TYPE STREQUAL "Profiling") message("Building in Profiling mode") set_target_properties(${EXEC} PROPERTIES COMPILE_FLAGS "${CMAKE_CXX_FLAGS_PROFILING}") # define 'PROFILING' flag used by service to change directory target_compile_definitions(${EXEC} PRIVATE PROFILING=1) set(CMAKE_SHARED_LINKED_FLAGS "${CMAKE_SHARED_LINKED_FLAGS} -O2 -pg") target_link_libraries(${EXEC} -O2 -pg) endif() ================================================ FILE: C/services/storage/README.rst ================================================ .. |br| raw:: html <br /> *********************** Fledge Storage Service *********************** This is the Storage service of the Fledge platform, it provides a storage layer with REST interface and a pluggable mechanism to attach to data storage systems, e.g. databases or document stores. |br| |br| Building ======== The Storage service is built using cmake, to build the Storage service: :: mkdir build cd build cmake .. make This will create the executable file ``storage`` service. Use the command ``make install`` to install in the default location, note you will need permission on the installation directory or use the sudo command. Pass the option *DESTDIR=* to set your own destination into which to install the Storage service. Build the plugins by going to the directory *C/plugins/storage* and follow the instructions in each of the plugin directories. |br| |br| Prerequisites ============= To build the Storage service the machine must have installed the *cmake* system, *make* and *g++*, plus the libraries for the Storage plugin, e.g. Postgres and the boost libraries To run the Storage service the system requires a number of libraries be installed; boost system and the Postgres libpg libraries On Ubuntu based Linux distributions these can be installed with *apt-get*: :: apt-get install libboost-dev libboost-system-dev libboost-thread-dev libpq-dev apt-get install cmake g++ make |br| |br| Running ======= The Storage service may be run in daemon mode or interactively by use of the *-d* command line argument. The Storage service will register with the core to allow other services and the core to find the API of the Storage service. It assumes the core is located on the same machine. This can however be overridden by the use of the command line argument *--port=* and *--address=* to set the port and address of the core microservice. The Storage layer will look for Storage plugins in the current directory or in the directory *$FLEDGE_ROOT/plugins/storage*. |br| |br| Ports ===== The Storage system listens for REST requests on two separate ports, the service port for storage based requests and the management port for management requests. These may either be set to specific ports in the configuration file or dynamic ports can be allocated at runtime. In this later mode of operation the clients of the Storage layer must determine these ports by connecting to the core and requesting for the Storage layer registration information. To run the Storage service with fixed ports modify the configuration cache file, *storage.json* in *$FLEDGE_DATA/etc* to pass explicit ports rather than 0. Note that if not set, *$FLEDGE_DATA* has the same value of *$FLEDGE_ROOT*. config.json file ---------------- This is an example of a *config.json* file: :: { "plugin" : { "value":"postgres" }, "threads" : { "value":"1" }, "port" : { "value":"8082" }, "managementPort": { "value":"1082" } } |br| |br| Testing ======= A test suite is available in the development directory *tests/unit_tests/services/storage*. ================================================ FILE: C/services/storage/configuration.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <configuration.h> #include <rapidjson/document.h> #include <rapidjson/istreamwrapper.h> #include <rapidjson/ostreamwrapper.h> #include <rapidjson/error/en.h> #include <rapidjson/writer.h> #include <fstream> #include <iostream> #include <unordered_set> #include <unistd.h> #include <plugin_api.h> #include <plugin_manager.h> static std::string defaultConfiguration(QUOTE({ "plugin" : { "value" : "sqlite", "default" : "sqlite", "description" : "The main storage plugin to load", "type" : "enumeration", "options" : [ "sqlite", "sqlitelb", "postgres" ], "displayName" : "Storage Plugin", "order" : "1" }, "readingPlugin" : { "value" : "Use main plugin", "default" : "Use main plugin", "description" : "The storage plugin to load for readings data.", "type" : "enumeration", "options" : [ "Use main plugin", "sqlite", "sqlitelb", "sqlitememory", "postgres" ], "displayName" : "Readings Plugin", "order" : "2" }, "threads" : { "value" : "1", "default" : "1", "description" : "The number of threads to use for the storage API", "type" : "integer", "displayName" : "Storage API threads", "minimum" : "1", "maximum" : "10", "order" : "3" }, "workerPool" : { "value" : "5", "default" : "5", "description" : "The number of threads to create in the thread pool used to execute operations against reading data", "type" : "integer", "displayName" : "Worker thread pool", "minimum" : "1", "maximum" : "10", "order" : "4" }, "managedStatus" : { "value" : "false", "default" : "false", "description" : "Control if Fledge should manage the storage provider", "type" : "boolean", "displayName" : "Manage Storage", "order" : "5" }, "port" : { "value" : "0", "default" : "0", "description" : "The port to listen on", "type" : "integer", "displayName" : "Service Port", "order" : "6" }, "managementPort" : { "value" : "0", "default" : "0", "description" : "The management port to listen on.", "type" : "integer", "displayName" : "Management Port", "order" : "7" }, "logLevel" : { "value" : "warning", "default" : "warning", "description" : "Minimum level of messages to log", "type" : "enumeration", "displayName" : "Log Level", "options" : [ "error", "warning", "info", "debug" ], "order" : "8" }, "timeout" : { "value" : "60", "default" : "60", "description" : "Server request timeout, expressed in seconds", "type" : "integer", "displayName" : "Timeout", "order" : "9", "minimum" : "5", "maximum" : "3600" }, "perfmon": { "description": "Track and store performance counters", "type": "boolean", "displayName": "Performance Counters", "default": "false", "value": "false", "order" : "10" } })); using namespace std; using namespace rapidjson; /** * Constructor for storage service configuration class. */ StorageConfiguration::StorageConfiguration() { logger = Logger::getLogger(); document = new Document(); /** * Update options in deafult configuration for items 'plugin' and * 'readingPlugin' with installed plugins */ updateStoragePluginConfig(); readCache(); checkCache(); if (hasValue("logLevel")) { logger->setMinLevel(getValue("logLevel")); } } /** * Storage configuration destructor */ StorageConfiguration::~StorageConfiguration() { delete document; } /** * Return if a value exsits for the cached configuration category */ bool StorageConfiguration::hasValue(const string& key) { if (document->HasParseError()) { logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(document->GetParseError()), document->GetErrorOffset()); return false; } if (!document->HasMember(key.c_str())) return false; return true; } /** * Return a value from the cached configuration category */ const char *StorageConfiguration::getValue(const string& key) { if (document->HasParseError()) { logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(document->GetParseError()), document->GetErrorOffset()); return 0; } if (!document->HasMember(key.c_str())) return 0; Value& item = (*document)[key.c_str()]; return item["value"].GetString(); } /** * Set the value of a configuration item */ bool StorageConfiguration::setValue(const string& key, const string& value) { try { Value& item = (*document)[key.c_str()]; const char *cstr = value.c_str(); item["value"].SetString(cstr, strlen(cstr), document->GetAllocator()); return true; } catch (...) { return false; } } /** * Called when the configuration category is updated. */ void StorageConfiguration::updateCategory(const string& json) { logger->info("New storage configuration %s", json.c_str()); Document *newdoc = new Document(); newdoc->Parse(json.c_str()); if (newdoc->HasParseError()) { logger->error("New configuration failed to parse. %s at %d", GetParseError_En(newdoc->GetParseError()), newdoc->GetErrorOffset()); delete newdoc; } else { delete document; document = newdoc; writeCache(); } } /** * Read the cache JSON for te configuration category from the cache file * into memory. */ void StorageConfiguration::readCache() { string cachefile; getConfigCache(cachefile); if (access(cachefile.c_str(), F_OK ) != 0) { logger->info("Storage cache %s unreadable, using default configuration: %s.", cachefile.c_str(), defaultConfiguration.c_str()); document->Parse(defaultConfiguration.c_str()); if (document->HasParseError()) { logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(document->GetParseError()), document->GetErrorOffset()); } writeCache(); return; } try { ifstream ifs(cachefile); IStreamWrapper isw(ifs); document->ParseStream(isw); if (document->HasParseError()) { logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(document->GetParseError()), document->GetErrorOffset()); } } catch (exception& ex) { logger->error("Configuration cache failed to read %s.", ex.what()); } } /** * Write the configuration cache to disk */ void StorageConfiguration::writeCache() { string cachefile; getConfigCache(cachefile); ofstream ofs(cachefile); OStreamWrapper osw(ofs); Writer<OStreamWrapper> writer(osw); document->Accept(writer); } /** * Retrieve the location of the configuration cache to use * * If a configuration cache exists in the current directory then it is used * * If not and the environment variable FLEDGE_DATA exists then the * configuration file under etc in that directory will be used. * * If that does not exist and the configuration variable FLEDGE_HONE * exists then a configuration file under etc in that dirstory is used */ void StorageConfiguration::getConfigCache(string& cache) { char buf[512], *basedir; if (access(CONFIGURATION_CACHE_FILE, F_OK) == 0) { cache = CONFIGURATION_CACHE_FILE; return; } if ((basedir = getenv("FLEDGE_DATA")) != NULL) { snprintf(buf, sizeof(buf), "%s/etc/%s", basedir, CONFIGURATION_CACHE_FILE); if (access(buf, F_OK) == 0) { cache = buf; return; } } else if ((basedir = getenv("FLEDGE_ROOT")) != NULL) { snprintf(buf, sizeof(buf), "%s/data/etc/%s", basedir, CONFIGURATION_CACHE_FILE); if (access(buf, F_OK) == 0) { cache = buf; return; } } else { snprintf(buf, sizeof(buf), "%s", CONFIGURATION_CACHE_FILE); } // No configuration cache has been found - return the default location cache = buf; } /** * Return the default category to register with the core. This allows * the storage configuration to appear in the UI * * @return DefaultConfigCategory* The default configuration category */ DefaultConfigCategory *StorageConfiguration::getDefaultCategory() { StringBuffer buffer; Writer<StringBuffer> writer(buffer); document->Accept(writer); const char *config = buffer.GetString(); return new DefaultConfigCategory(STORAGE_CATEGORY, config); } /** * One off check for upgrade to cache that has full UI information * * This is only really triggered when we first do an upgrade from the * older cache files to the current JSON defaults that contains the * full information needed for the GUI. * * FOGL-4151 After changing to a new plugin, say from sqlite to postgres, the first * time we run in the new database there is no configuraion category. In this case we will * get the default category, which will have a default of sqlite and no value. This will * end up reporting the wrong information in the UI when we look at the category, therefore * we special case the plugin name and set the default to whatever the current value is * for just this property. * * FOGL-7074 Make the plugin selection an enumeration */ void StorageConfiguration::checkCache() { bool forceUpdate = false; bool writeCacheRequired = false; /* * If the cached version of the configuFration that has been read in * does not contain an item in the default configuration, then copy * that item from the default configuration. * * This allows new tiems to be added to the configuration and populated * in the cache on first restart. */ Document *newdoc = new Document(); newdoc->Parse(defaultConfiguration.c_str()); if (newdoc->HasParseError()) { logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(document->GetParseError()), newdoc->GetErrorOffset()); } else { for (Value::ConstMemberIterator itr = newdoc->MemberBegin(); itr != newdoc->MemberEnd(); ++itr) { const char *name = itr->name.GetString(); Value &newval = (*newdoc)[name]; if (!hasValue(name)) { logger->warn("Adding storage configuration item %s from defaults", name); Document::AllocatorType& a = document->GetAllocator(); Value copy(name, a); copy.CopyFrom(newval, a); Value n(name, a); document->AddMember(n, copy, a); writeCacheRequired = true; } } // if storage plugins are updated after cache is created, update exisitng cache // with new/removed plugins if (document->HasMember("plugin") && newdoc->HasMember("plugin")) { Value& currentItem = (*newdoc)["plugin"]; Value& cacheItem = (*document)["plugin"]; // check for difference between cached plugin options and // currently installed storage plugins unordered_set<std::string>cacheOptions; unordered_set<std::string>currentOptions; // build list of plugins for (auto& options : currentItem["options"].GetArray()) { currentOptions.insert(options.GetString()); } if (cacheItem.HasMember("options") && cacheItem["options"].IsArray()) { for (auto& options : cacheItem["options"].GetArray()) { if (options.IsString()) { cacheOptions.insert(options.GetString()); } } } // check for difference between cached and current plugins bool updateOptions = false; if (cacheOptions.size() != currentOptions.size()) { updateOptions = true; } else { for (const std::string& element : currentOptions) { if (cacheOptions.find(element) == cacheOptions.end()) { updateOptions = true; break; } } } if (updateOptions) { // Update cached plugins option Document::AllocatorType& a = document->GetAllocator(); cacheItem["options"].SetArray(); for (auto& option : currentOptions) { cacheItem["options"].PushBack(Value().SetString(option.c_str(),a), a); } writeCacheRequired = true; } } if (document->HasMember("readingPlugin") && newdoc->HasMember("readingPlugin")) { Value& currentItem = (*newdoc)["readingPlugin"]; Value& cacheItem = (*document)["readingPlugin"]; // check for difference between cached plugin options and // currently installed storage plugins unordered_set<std::string>cacheOptions; unordered_set<std::string>currentOptions; // build list of plugins for (auto& options : currentItem["options"].GetArray()) { currentOptions.insert(options.GetString()); } if (cacheItem.HasMember("options") && cacheItem["options"].IsArray()) { for (auto& options : cacheItem["options"].GetArray()) { if (options.IsString()) { cacheOptions.insert(options.GetString()); } } } // check for difference between cached and current plugins bool updateOptions = false; if (cacheOptions.size() != currentOptions.size()) { updateOptions = true; } else { for (const std::string& element : currentOptions) { if (cacheOptions.find(element) == cacheOptions.end()) { updateOptions = true; break; } } } if (updateOptions) { // Update cached plugins option Document::AllocatorType& a = document->GetAllocator(); cacheItem["options"].SetArray(); for (auto& option : currentOptions) { cacheItem["options"].PushBack(Value().SetString(option.c_str(),a), a); } writeCacheRequired = true; } } } delete newdoc; if (writeCacheRequired) { // We added a new member writeCache(); } // Upgrade step to add eumeration for plugin if (document->HasMember("plugin")) { Value& item = (*document)["plugin"]; if (item.HasMember("type") && item["type"].IsString()) { const char *type = item["type"].GetString(); if (strcmp(type, "enumeration")) { // It's not an enumeration currently forceUpdate = true; } } } // Cache is from before we used an enumeration for the plugin, force upgrade // steps if (forceUpdate == false && document->HasMember("plugin")) { logger->info("Adding database plugin enumerations"); Value& item = (*document)["plugin"]; if (item.HasMember("type")) { const char *val = getValue("plugin"); item["default"].SetString(val, strlen(val)); Value& rp = (*document)["readingPlugin"]; const char *rval = getValue("readingPlugin"); if (strlen(rval) == 0) { rval = "Use main plugin"; } char *ncrval = strdup(rval); rp["default"].SetString(ncrval, strlen(rval)); rp["value"].SetString(ncrval, strlen(rval)); logger->info("Storage configuration cache is up to date"); return; } } logger->info("Storage configuration cache is not up to date"); newdoc = new Document(); newdoc->Parse(defaultConfiguration.c_str()); if (newdoc->HasParseError()) { logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(document->GetParseError()), newdoc->GetErrorOffset()); } else { for (Value::ConstMemberIterator itr = newdoc->MemberBegin(); itr != newdoc->MemberEnd(); ++itr) { const char *name = itr->name.GetString(); Value &newval = (*newdoc)[name]; if (hasValue(name)) { const char *val = getValue(name); newval["value"].SetString(strdup(val), strlen(val)); if (strcmp(name, "plugin") == 0) { newval["default"].SetString(strdup(val), strlen(val)); logger->warn("Set default of %s to %s", name, val); } if (strcmp(name, "readingPlugin") == 0) { if (strlen(val) == 0) { val = "Use main plugin"; } newval["default"].SetString(strdup(val), strlen(val)); logger->warn("Set default of %s to %s", name, val); } } } } delete document; document = newdoc; writeCache(); } /** * Check for installed storage and readings plugin and update default configuration. * * Update options for category item 'plugin' and 'readingPlugin' * with installed plugins. * * If no plugin is found default config is not updated. * * For plugins installed after cache is created options is updated via checkCache on restart */ void StorageConfiguration::updateStoragePluginConfig() { PluginManager *manager = PluginManager::getInstance(); manager->setPluginType(PLUGIN_TYPE_ID_STORAGE); // Fetch installed storage and readings plugins. auto storagePlugins = manager->getPluginsByFlags(PLUGIN_TYPE_STORAGE, SP_COMMON); auto readingsPlugins = manager->getPluginsByFlags(PLUGIN_TYPE_STORAGE, SP_READINGS); Document newDocument; newDocument.Parse(defaultConfiguration.c_str()); if (storagePlugins.size() > 0) { // Modify the "options" array for storage with installed plugins if (newDocument.HasMember("plugin") && newDocument["plugin"].IsObject()) { Value& plugin = newDocument["plugin"]; if (plugin.HasMember("options") && plugin["options"].IsArray()) { Value& options = plugin["options"]; options.Clear(); for (const auto& option : storagePlugins) { options.PushBack(Value().SetString(option.c_str(), newDocument.GetAllocator()), newDocument.GetAllocator()); } } } } else { logger->debug("unable to find installed storage plugins"); } if (readingsPlugins.size() > 0) { // Modify the "options" array for readingsPlugin with installed plugins if (newDocument.HasMember("readingPlugin") && newDocument["readingPlugin"].IsObject()) { Value& plugin = newDocument["readingPlugin"]; if (plugin.HasMember("options") && plugin["options"].IsArray()) { Value& options = plugin["options"]; options.Clear(); // Add default option "Use main plugin" options.PushBack(Value().SetString("Use main plugin", newDocument.GetAllocator()), newDocument.GetAllocator()); for (const auto& option : readingsPlugins) { options.PushBack(Value().SetString(option.c_str(), newDocument.GetAllocator()), newDocument.GetAllocator()); } } } } else { logger->debug("unable to find installed readings plugins"); } // Update default configuration if options are modified if (storagePlugins.size() > 0 || readingsPlugins.size() > 0) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); newDocument.Accept(writer); defaultConfiguration = buffer.GetString(); } } ================================================ FILE: C/services/storage/include/configuration.h ================================================ #ifndef _CONFIGURATION_H #define _CONFIGURATION_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <string> #include <rapidjson/document.h> #include <config_category.h> #define STORAGE_CATEGORY "Storage" #define CATEGORY_DESCRIPTION "Storage configuration" #define ADVANCED "Advanced" #define CONFIGURATION_CACHE_FILE "storage.json" /** * The storage service must handle its own configuration differently * to other services as it is unable to read the configuration from * the database. The configuration is required in order to connnect * to the database. Therefore it keeps a shadow copy in a local file * and it keeps this local, cached copy up to date by registering * interest in the category and whenever a chaneg is made writing * the category to the local cache file. */ class StorageConfiguration { public: StorageConfiguration(); ~StorageConfiguration(); const char *getValue(const std::string& key); bool hasValue(const std::string& key); bool setValue(const std::string& key, const std::string& value); void updateCategory(const std::string& json); DefaultConfigCategory *getDefaultCategory(); private: void getConfigCache(std::string& cache); rapidjson::Document *document; void readCache(); void writeCache(); void checkCache(); void updateStoragePluginConfig(); Logger *logger; }; #endif ================================================ FILE: C/services/storage/include/plugin_configuration.h ================================================ #ifndef _PLUGIN_CONFIGURATION_H #define _PLUGIN_CONFIGURATION_H /* * Fledge storage service. * * Copyright (c) 2020 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <logger.h> #include <string> #include <rapidjson/document.h> #include <config_category.h> #include <management_client.h> class StoragePlugin; /** * The storage service must handle its own configuration differently * to other services as it is unable to read the configuration from * the database. * This class deals with the configuration from the storage plugins, * maintaining a cache for the plugin */ class StoragePluginConfiguration { public: StoragePluginConfiguration(const std::string& name, StoragePlugin *plugin); const char *getValue(const std::string& key); bool hasValue(const std::string& key); bool setValue(const std::string& key, const std::string& value); void updateCategory(const std::string& json); void registerCategory(ManagementClient *client); DefaultConfigCategory *getDefaultCategory(); ConfigCategory *getConfiguration(); private: void getConfigCache(std::string& cache); void readCache(); void writeCache(); void updateCache(); const std::string m_name; const StoragePlugin *m_plugin; std::string m_category; std::string m_defaultConfiguration; rapidjson::Document *m_document; Logger *m_logger; }; #endif ================================================ FILE: C/services/storage/include/storage_api.h ================================================ #ifndef _STORAGE_API_H #define _STORAGE_API_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <server_http.hpp> #include <storage_plugin.h> #include <storage_stats.h> #include <storage_registry.h> #include <stream_handler.h> #include <perfmonitors.h> using namespace std; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; /* * The URL for each entry point */ #define COMMON_ACCESS "^/storage/table/([A-Za-z][a-zA-Z0-9_]*)$" #define COMMON_QUERY "^/storage/table/([A-Za-z][a-zA-Z_0-9]*)/query$" #define READING_ACCESS "^/storage/reading$" #define READING_QUERY "^/storage/reading/query" #define READING_PURGE "^/storage/reading/purge" #define READING_INTEREST "^/storage/reading/interest/([A-Za-z0-9\\*][a-zA-Z0-9_%\\.\\-]*)$" #define TABLE_INTEREST "^/storage/table/interest/([A-Za-z\\*][a-zA-Z0-9_%\\.\\-]*)$" #define GET_TABLE_SNAPSHOTS "^/storage/table/([A-Za-z][a-zA-Z_0-9_]*)/snapshot$" #define CREATE_TABLE_SNAPSHOT GET_TABLE_SNAPSHOTS #define LOAD_TABLE_SNAPSHOT "^/storage/table/([A-Za-z][a-zA-Z_0-9_]*)/snapshot/([a-zA-Z_0-9_]*)$" #define DELETE_TABLE_SNAPSHOT LOAD_TABLE_SNAPSHOT #define CREATE_STORAGE_STREAM "^/storage/reading/stream$" #define STORAGE_SCHEMA "^/storage/schema" #define STORAGE_TABLE_ACCESS "^/storage/schema/([A-Za-z][a-zA-Z0-9_]*)/table/([A-Za-z][a-zA-Z0-9_]*)$" #define STORAGE_TABLE_QUERY "^/storage/schema/([A-Za-z][a-zA-Z0-9_]*)/table/([A-Za-z][a-zA-Z_0-9]*)/query$" #define PURGE_FLAG_RETAIN "retain" #define PURGE_FLAG_RETAIN_ANY "retainany" #define PURGE_FLAG_RETAIN_ALL "retainall" #define PURGE_FLAG_PURGE "purge" #define TABLE_NAME_COMPONENT 1 #define STORAGE_SCHEMA_NAME_COMPONENT 1 #define STORAGE_TABLE_NAME_COMPONENT 2 #define ASSET_NAME_COMPONENT 1 #define SNAPSHOT_ID_COMPONENT 2 /** * Class used to queue the operations to be executed by * the worker thread pool */ class StorageOperation { public: enum Operations { ReadingAppend, ReadingPurge, ReadingFetch, ReadingQuery }; public: StorageOperation(StorageOperation::Operations operation, shared_ptr<HttpServer::Request> request, shared_ptr<HttpServer::Response> response) : m_operation(operation), m_request(request), m_response(response) { }; ~StorageOperation() { }; public: StorageOperation::Operations m_operation; shared_ptr<HttpServer::Request> m_request; shared_ptr<HttpServer::Response> m_response; }; class StoragePerformanceMonitor; /** * The Storage API class - this class is responsible for the registration of all API * entry points in the storage API and the dispatch of those API calls to the internals * of the storage service and the storage plugin itself. */ class StorageApi { public: StorageApi(const unsigned short port, const unsigned int threads, const unsigned int workerPoolSize); ~StorageApi(); static StorageApi *getInstance(); void initResources(); void setPlugin(StoragePlugin *); void setReadingPlugin(StoragePlugin *); void start(); void startServer(); void wait(); void stopServer(); unsigned short getListenerPort(); void commonInsert(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void commonSimpleQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void commonQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void commonUpdate(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void commonDelete(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void defaultResource(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void readingAppend(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void readingFetch(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void readingQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void readingPurge(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void readingRegister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void readingUnregister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void tableRegister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void tableUnregister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void createTableSnapshot(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void loadTableSnapshot(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void deleteTableSnapshot(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void getTableSnapshots(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void createStorageStream(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); bool readingStream(ReadingStream **readings, bool commit); void createStorageSchema(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void storageTableInsert(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void storageTableUpdate(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void storageTableDelete(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void storageTableSimpleQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void storageTableQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request); void printList(); bool createSchema(const std::string& schema); void setTimeout(long timeout) { if (m_server) { m_server->config.timeout_request = timeout; } }; StoragePlugin *getStoragePlugin() { return plugin; }; StoragePerformanceMonitor *getPerformanceMonitor() { return m_perfMonitor; }; void worker(); void queue(StorageOperation::Operations op, shared_ptr<HttpServer::Request> request, shared_ptr<HttpServer::Response> response); public: std::atomic<int> m_workers_count; private: static StorageApi *m_instance; HttpServer *m_server; unsigned short m_port; unsigned int m_threads; thread *m_thread; StoragePlugin *plugin; StoragePlugin *readingPlugin; StorageStats stats; std::map<string, pair<int,std::list<std::string>::iterator>> m_seqnum_map; const unsigned int max_entries_in_seqnum_map = 16; std::list<std::string> seqnum_map_lru_list; // has the most recently accessed elements of m_seqnum_map at front of the dequeue std::mutex mtx_seqnum_map; StorageRegistry registry; void respond(shared_ptr<HttpServer::Response>, const string&); void respond(shared_ptr<HttpServer::Response>, SimpleWeb::StatusCode, const string&); void internalError(shared_ptr<HttpServer::Response>, const exception&); void mapError(string&, PLUGIN_ERROR *); StreamHandler *streamHandler; StoragePerformanceMonitor *m_perfMonitor; std::mutex m_queueMutex; std::condition_variable m_queueCV; std::queue<StorageOperation *> m_queue; std::vector<std::thread *> m_workers; unsigned int m_workerPoolSize; bool m_shutdown; }; /** * StoragePerformanceMonitor is a derived class from PerformanceMonitor * It allows direct writing of monitoring data to database */ class StoragePerformanceMonitor : public PerformanceMonitor { public: // Constructor with StorageApi pointer passed (also calling parent PerformanceMonitor constructor) StoragePerformanceMonitor(const std::string& name, StorageApi *api) : PerformanceMonitor(name, NULL), m_name(name), m_instance(api) { }; // Direct write to storage of monitor data void writeData(const std::string& table, const InsertValues& values) { m_instance->getStoragePlugin()->commonInsert(table, values.toJSON()); } private: std::string m_name; StorageApi *m_instance; }; #endif ================================================ FILE: C/services/storage/include/storage_plugin.h ================================================ #ifndef _STORAGE_PLUGIN #define _STORAGE_PLUGIN /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include <plugin.h> #include <plugin_manager.h> #include <string> #include <reading_stream.h> #include <plugin_configuration.h> #define STORAGE_PURGE_RETAIN_ANY 0x0001U #define STORAGE_PURGE_RETAIN_ALL 0x0002U #define STORAGE_PURGE_SIZE 0x0004U /** * Class that represents a storage plugin. * * The purpose of this class is to hide the use of the pointers into the * dynamically loaded plugin and wrap the interface into a class that * can be used directly in the storage subsystem. * * This is achieved by having a set of private member variables which are * the pointers to the functions in the plugin, and a set of public methods * that will call these functions via the function pointers. */ class StoragePlugin : public Plugin { public: StoragePlugin(const std::string& name, PLUGIN_HANDLE handle); ~StoragePlugin(); int commonInsert(const std::string& table, const std::string& payload, const char *schema = nullptr); char *commonRetrieve(const std::string& table, const std::string& payload, const char *schema = nullptr); int commonUpdate(const std::string& table, const std::string& payload, const char *schema = nullptr); int commonDelete(const std::string& table, const std::string& payload, const char *schema = nullptr); int readingsAppend(const std::string& payload); char *readingsFetch(unsigned long id, unsigned int blksize); char *readingsRetrieve(const std::string& payload); char *readingsPurge(unsigned long age, unsigned int flags, unsigned long sent); long *readingsPurge(); char *readingsPurgeAsset(const std::string& asset); void release(const char *response); int createTableSnapshot(const std::string& table, const std::string& id); int loadTableSnapshot(const std::string& table, const std::string& id); int deleteTableSnapshot(const std::string& table, const std::string& id); char *getTableSnapshots(const std::string& table); PLUGIN_ERROR *lastError(); bool hasStreamSupport() { return readingStreamPtr != NULL; }; int readingStream(ReadingStream **stream, bool commit); bool pluginShutdown(); int createSchema(const std::string& payload); StoragePluginConfiguration *getConfig() { return m_config; }; const std::string &getName() { return m_name; }; private: PLUGIN_HANDLE instance; int (*commonInsertPtr)(PLUGIN_HANDLE, const char *, const char *) = nullptr; char *(*commonRetrievePtr)(PLUGIN_HANDLE, const char *, const char *) = nullptr; int (*commonUpdatePtr)(PLUGIN_HANDLE, const char *, const char *) = nullptr; int (*commonDeletePtr)(PLUGIN_HANDLE, const char *, const char *) = nullptr; int (*storageSchemaInsertPtr)(PLUGIN_HANDLE, const char *, const char *, const char*) = nullptr; char *(*storageSchemaRetrievePtr)(PLUGIN_HANDLE, const char *, const char *, const char*) = nullptr; int (*storageSchemaUpdatePtr)(PLUGIN_HANDLE, const char *, const char *, const char*) = nullptr; int (*storageSchemaDeletePtr)(PLUGIN_HANDLE, const char *, const char *, const char*) = nullptr; int (*readingsAppendPtr)(PLUGIN_HANDLE, const char *); char *(*readingsFetchPtr)(PLUGIN_HANDLE, unsigned long id, unsigned int blksize); char *(*readingsRetrievePtr)(PLUGIN_HANDLE, const char *payload); char *(*readingsPurgePtr)(PLUGIN_HANDLE, unsigned long age, unsigned int flags, unsigned long sent); unsigned int (*readingsPurgeAssetPtr)(PLUGIN_HANDLE, const char *asset); void (*releasePtr)(PLUGIN_HANDLE, const char *payload); int (*createTableSnapshotPtr)(PLUGIN_HANDLE, const char *, const char *); int (*loadTableSnapshotPtr)(PLUGIN_HANDLE, const char *, const char *); int (*deleteTableSnapshotPtr)(PLUGIN_HANDLE, const char *, const char *); char *(*getTableSnapshotsPtr)(PLUGIN_HANDLE, const char *); int (*readingStreamPtr)(PLUGIN_HANDLE, ReadingStream **, bool); PLUGIN_ERROR *(*lastErrorPtr)(PLUGIN_HANDLE); bool (*pluginShutdownPtr)(PLUGIN_HANDLE); int (*createSchemaPtr)(PLUGIN_HANDLE, const char*); std::string m_name; StoragePluginConfiguration *m_config; bool m_bStorageSchemaFlag = false; }; #endif ================================================ FILE: C/services/storage/include/storage_registry.h ================================================ #ifndef _STORAGE_REGISTRY_H #define _STORAGE_REGISTRY_H #include <vector> #include <queue> #include <string> #include <mutex> #include <condition_variable> #include <thread> #include <map> /** * The number of refused connections required before a registration * is removed. Connection refusal is a result of the service that had * registered failing. */ #define MAX_REFUSALS 3 typedef std::vector<std::pair<std::string *, std::string *> > REGISTRY; typedef struct { std::string url; std::string key; std::vector<std::string> keyValues; std::string operation; } TableRegistration; typedef std::vector<std::pair<std::string *, TableRegistration *> > REGISTRY_TABLE; /** * StorageRegistry - a class that manages requests from other microservices * to register interest in new readings being inserted into the storage layer * that match a given asset code, or any asset code "*". */ class StorageRegistry { public: StorageRegistry(); ~StorageRegistry(); void registerAsset(const std::string& asset, const std::string& url); void unregisterAsset(const std::string& asset, const std::string& url); void process(const std::string& payload); void processTableInsert(const std::string& tableName, const std::string& payload); void processTableUpdate(const std::string& tableName, const std::string& payload); void processTableDelete(const std::string& tableName, const std::string& payload); void registerTable(const std::string& table, const std::string& url); void unregisterTable(const std::string& table, const std::string& url); void run(); private: void processPayload(char *payload); void sendPayload(const std::string& url, const char *payload); void filterPayload(const std::string& url, char *payload, const std::string& asset); void processInsert(char *tableName, char *payload); void processUpdate(char *tableName, char *payload); void processDelete(char *tableName, char *payload); TableRegistration* parseTableSubscriptionPayload(const std::string& payload); void processAssetRefusals(); void processTableRefusals(); void insertTestTableReg(); void removeTestTableReg(int n); typedef std::pair<time_t, char *> Item; typedef std::tuple<time_t, char *, char *> TableItem; REGISTRY m_registrations; REGISTRY_TABLE m_tableRegistrations; std::queue<StorageRegistry::Item> m_queue; std::queue<StorageRegistry::TableItem> m_tableInsertQueue; std::queue<StorageRegistry::TableItem> m_tableUpdateQueue; std::queue<StorageRegistry::TableItem> m_tableDeleteQueue; std::mutex m_qMutex; std::mutex m_registrationsMutex; std::mutex m_tableRegistrationsMutex; std::thread *m_thread; std::condition_variable m_cv; std::mutex m_cvMutex; bool m_running; std::map<const std::string, int> m_refusals; }; #endif ================================================ FILE: C/services/storage/include/storage_service.h ================================================ #ifndef _STORAGE_SERVICE_H #define _STORAGE_SERVICE_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <storage_api.h> #include <logger.h> #include <configuration.h> #include <storage_plugin.h> #include <plugin_configuration.h> #include <service_handler.h> #define SERVICE_NAME "Fledge Storage" /** * The StorageService class. This class is the core * of the service that offers access to the Fledge * storage layer. It maintains the API and provides * the hooks for incoming management API requests. */ class StorageService : public ServiceHandler { public: StorageService(const string& name); ~StorageService(); void start(std::string& coreAddress, unsigned short corePort); void stop(); void shutdown(); void restart(); bool isRunning() { return !m_shutdown; }; void configChange(const std::string&, const std::string&); void configChildCreate(const std::string&, const std::string&, const std::string&){}; void configChildDelete(const std::string& , const std::string&){}; string getPluginName(); string getPluginManagedStatus(); string getReadingPluginName(); void setLogLevel(std::string level) { m_logLevel = level; }; private: const string& m_name; bool loadPlugin(); StorageApi *api; StorageConfiguration *config; Logger *logger; StoragePlugin *storagePlugin; StoragePlugin *readingPlugin; bool m_shutdown; bool m_requestRestart; std::string m_logLevel; long m_timeout; }; #endif ================================================ FILE: C/services/storage/include/storage_stats.h ================================================ #ifndef _STORAGE_STATS_H #define _STORAGE_STATS_H /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <json_provider.h> #include <string> class StorageStats : public JSONProvider { public: StorageStats(); void asJSON(std::string &) const; unsigned int commonInsert; unsigned int commonSimpleQuery; unsigned int commonQuery; unsigned int commonUpdate; unsigned int commonDelete; unsigned int readingAppend; unsigned int readingFetch; unsigned int readingQuery; unsigned int readingPurge; }; #endif ================================================ FILE: C/services/storage/include/stream_handler.h ================================================ #ifndef _STREAM_HANDLER_H #define _STREAM_HANDLER_H /* * Fledge storage service. * * Copyright (c) 2019 Dianomic Systems Inc. * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <thread> #include <mutex> #include <condition_variable> #include <vector> #include <map> #include <sys/epoll.h> #include <reading_stream.h> #include <string> #define MAX_EVENTS 40 // Number of epoll events in one epoll_wait call #define RDS_BLOCK 10000 // Number of readings to insert in each call to the storage plugin #define BLOCK_POOL_SIZES 512 // Increments of block sizes in a block pool class StorageApi; class StreamHandler { public: StreamHandler(StorageApi *); ~StreamHandler(); void handler(); uint32_t createStream(uint32_t *token); private: class Stream { public: Stream(); ~Stream(); uint32_t create(int epollfd, uint32_t *token); void handleEvent(int epollfd, StorageApi *api, uint32_t events); private: /** * A simple memory pool we use to store the messages we receive. * We use this rather than malloc because it let's us avoid the overhead of * the more complex heap mamagement and also because it means we avoid * taking out a process wide mutex. */ class MemoryPool { public: MemoryPool(size_t blkIncr) : m_blkIncr(blkIncr) {}; ~MemoryPool(); void *allocate(size_t size); void release(void *handle); private: size_t rndSize(size_t size) { return m_blkIncr * ((size + m_blkIncr - 1) / m_blkIncr); }; void createPool(size_t size); void growPool(std::vector<void *>*, size_t); size_t m_blkIncr; std::map<size_t, std::vector<void *>* > m_pool; }; void setNonBlocking(int fd); unsigned int available(int fd); void queueInsert(StorageApi *api, unsigned int nReadings, bool commit); void dump(int n); enum { Closed, Listen, AwaitingToken, Connected } m_status; int m_socket; uint16_t m_port; uint32_t m_token; uint32_t m_blockNo; enum { BlkHdr, RdHdr, RdBody } m_protocolState; uint32_t m_readingNo; uint32_t m_blockSize; size_t m_readingSize; struct epoll_event m_event; ReadingStream *m_readings[RDS_BLOCK+1]; ReadingStream *m_currentReading; MemoryPool *m_blockPool; std::string m_lastAsset; bool m_sameAsset; }; StorageApi *m_api; std::thread m_handlerThread; int m_tokens; std::condition_variable m_streamsCV; std::mutex m_streamsMutex; std::vector<Stream *> m_streams; bool m_running; int m_pollfd; }; #endif ================================================ FILE: C/services/storage/pluginconfiguration.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2020 Diamonic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <plugin_configuration.h> #include <rapidjson/document.h> #include <rapidjson/istreamwrapper.h> #include <rapidjson/ostreamwrapper.h> #include <rapidjson/error/en.h> #include <rapidjson/writer.h> #include <fstream> #include <iostream> #include <unistd.h> #include <plugin_api.h> #include <storage_plugin.h> using namespace std; using namespace rapidjson; /** * Constructor for storage service configuration class. */ StoragePluginConfiguration::StoragePluginConfiguration(const string& name, StoragePlugin *plugin) : m_name(name), m_plugin(plugin) { m_defaultConfiguration = plugin->getInfo()->config; m_logger = Logger::getLogger(); m_document = new Document(); m_category = m_name; readCache(); updateCache(); } /** * Return if a value exsits for the cached configuration category */ bool StoragePluginConfiguration::hasValue(const string& key) { if (m_document->HasParseError()) { m_logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(m_document->GetParseError()), m_document->GetErrorOffset()); return false; } if (!m_document->HasMember(key.c_str())) return false; return true; } /** * Return a value from the cached configuration category */ const char *StoragePluginConfiguration::getValue(const string& key) { if (m_document->HasParseError()) { m_logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(m_document->GetParseError()), m_document->GetErrorOffset()); return 0; } if (!m_document->HasMember(key.c_str())) return 0; Value& item = (*m_document)[key.c_str()]; return item["value"].GetString(); } /** * Set the value of a configuration item */ bool StoragePluginConfiguration::setValue(const string& key, const string& value) { try { Value& item = (*m_document)[key.c_str()]; const char *cstr = value.c_str(); item["value"].SetString(cstr, strlen(cstr), m_document->GetAllocator()); return true; } catch (...) { return false; } } /** * Called when the configuration category is updated. */ void StoragePluginConfiguration::updateCategory(const string& json) { m_logger->info("New storage configuration %s", json.c_str()); Document *newdoc = new Document(); newdoc->Parse(json.c_str()); if (newdoc->HasParseError()) { m_logger->error("New configuration failed to parse. %s at %d", GetParseError_En(newdoc->GetParseError()), newdoc->GetErrorOffset()); delete newdoc; } else { delete m_document; m_document = newdoc; writeCache(); } } /** * Read the cache JSON for te configuration category from the cache file * into memory. */ void StoragePluginConfiguration::readCache() { string cachefile; getConfigCache(cachefile); if (access(cachefile.c_str(), F_OK ) != 0) { m_logger->info("Storage cache %s unreadable, using default configuration: %s.", cachefile.c_str(), m_defaultConfiguration.c_str()); ConfigCategory confCategory("tmp", m_defaultConfiguration.c_str()); confCategory.setItemsValueFromDefault(); m_document->Parse(confCategory.itemsToJSON().c_str()); if (m_document->HasParseError()) { m_logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(m_document->GetParseError()), m_document->GetErrorOffset()); } writeCache(); return; } try { ifstream ifs(cachefile); IStreamWrapper isw(ifs); m_document->ParseStream(isw); if (m_document->HasParseError()) { m_logger->error("Default configuration failed to parse. %s at %d", GetParseError_En(m_document->GetParseError()), m_document->GetErrorOffset()); } } catch (exception& ex) { m_logger->error("Configuration cache failed to read %s.", ex.what()); } } /** * Write the configuration cache to disk */ void StoragePluginConfiguration::writeCache() { string cachefile; getConfigCache(cachefile); ofstream ofs(cachefile); OStreamWrapper osw(ofs); Writer<OStreamWrapper> writer(osw); m_document->Accept(writer); } /** * Retrieve the location of the configuration cache to use * * If a configuration cache exists in the current directory then it is used * * If not and the environment variable FLEDGE_DATA exists then the * configuration file under etc in that directory will be used. * * If that does not exist and the configuration variable FLEDGE_HONE * exists then a configuration file under etc in that dirstory is used */ void StoragePluginConfiguration::getConfigCache(string& cache) { char buf[512], *basedir; snprintf(buf, sizeof(buf), "%s.json", m_name.c_str()); if (access(buf, F_OK) == 0) { cache = buf; return; } if ((basedir = getenv("FLEDGE_DATA")) != NULL) { snprintf(buf, sizeof(buf), "%s/etc/%s.json", basedir, m_name.c_str()); if (access(buf, F_OK) == 0) { cache = buf; return; } } else if ((basedir = getenv("FLEDGE_ROOT")) != NULL) { snprintf(buf, sizeof(buf), "%s/data/etc/%s.json", basedir, m_name.c_str()); if (access(buf, F_OK) == 0) { cache = buf; return; } } else { snprintf(buf, sizeof(buf), "%s.json", m_name.c_str()); } // No configuration cache has been found - return the default location cache = buf; } /** * Return the default category to register with the core. This allows * the storage configuration to appear in the UI * * @return DefaultConfigCategory* The default configuration category */ DefaultConfigCategory *StoragePluginConfiguration::getDefaultCategory() { StringBuffer buffer; Writer<StringBuffer> writer(buffer); m_document->Accept(writer); const char *config = buffer.GetString(); return new DefaultConfigCategory(m_category, config); } /** * Return the category to register with the core. This allows * the storage configuration to appear in the UI * * @return ConfigCategory* The default configuration category */ ConfigCategory *StoragePluginConfiguration::getConfiguration() { StringBuffer buffer; Writer<StringBuffer> writer(buffer); m_document->Accept(writer); const char *config = buffer.GetString(); return new ConfigCategory(m_category, config); } /** * Update the cache with any new items found in the configuration returned * by the plugin */ void StoragePluginConfiguration::updateCache() { Document d; d.Parse(m_defaultConfiguration.c_str()); if (d.HasParseError()) { m_logger->error("Configuration returned by plugin_init has parse errors"); } for (auto &item : d.GetObject()) { string itemName = item.name.GetString(); if (m_document->HasMember(itemName.c_str())) { } else { Value v; v.CopyFrom(d[itemName.c_str()], m_document->GetAllocator()); Value name; name.SetString(itemName.c_str(), itemName.length(), m_document->GetAllocator()); m_document->AddMember(name, v, m_document->GetAllocator()); } } } ================================================ FILE: C/services/storage/storage ================================================ ================================================ FILE: C/services/storage/storage.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <storage_service.h> #include <configuration.h> #include <management_api.h> #include <management_client.h> #include <service_record.h> #include <plugin_manager.h> #include <plugin_api.h> #include <plugin.h> #include <logger.h> #include <iostream> #include <string> #include <signal.h> #include <execinfo.h> #include <dlfcn.h> #include <cxxabi.h> #include <syslog.h> #include <config_handler.h> #include <plugin_configuration.h> #define NO_EXIT_STACKTRACE 0 // Set to 1 to make storage loop after stacktrace // This is useful to be able to attach a debbugger #define SERVICE_TYPE "Storage" extern int makeDaemon(void); using namespace std; /** * Signal handler to log stack trqaces on fatal signals */ static void handler(int sig) { Logger *logger = Logger::getLogger(); void *array[20]; char buf[1024]; int size; // get void*'s for all entries on the stack size = backtrace(array, 20); // print out all the frames to stderr logger->fatal("Signal %d (%s) trapped:\n", sig, strsignal(sig)); char **messages = backtrace_symbols(array, size); for (int i = 0; i < size; i++) { Dl_info info; if (dladdr(array[i], &info) && info.dli_sname) { char *demangled = NULL; int status = -1; if (info.dli_sname[0] == '_') demangled = abi::__cxa_demangle(info.dli_sname, NULL, 0, &status); snprintf(buf, sizeof(buf), "%-3d %*p %s + %zd---------", i, int(2 + sizeof(void*) * 2), array[i], status == 0 ? demangled : info.dli_sname == 0 ? messages[i] : info.dli_sname, (char *)array[i] - (char *)info.dli_saddr); free(demangled); } else { snprintf(buf, sizeof(buf), "%-3d %*p %s---------", i, int(2 + sizeof(void*) * 2), array[i], messages[i]); } logger->fatal("(%d) %s", i, buf); } free(messages); #if NO_EXIT_STACKTRACE while (1) { sleep(100); } #endif exit(1); } // Displays service information in JSON format static void printServiceInfoAsJSON() { static std::string serviceInfoJSON = R"({"name":"Storage Service","description":"Service buffers data within a single instance","type":")" + std::string(SERVICE_TYPE) + R"(","process":"storage","process_script":"[\"services/storage\"]"})"; std::cout << serviceInfoJSON << std::endl; } /** * Storage service main entry point */ int main(int argc, char *argv[]) { unsigned short corePort = 8082; string coreAddress = "localhost"; bool daemonMode = true; string myName = SERVICE_NAME; bool returnPlugin = false; bool returnReadingsPlugin = false; string logLevel = "warning"; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "--info")) { printServiceInfoAsJSON(); return 0; } if (!strcmp(argv[i], "-d")) { daemonMode = false; } else if (!strncmp(argv[i], "--port=", 7)) { corePort = (unsigned short)atoi(&argv[i][7]); } else if (!strncmp(argv[i], "--name=", 7)) { myName = &argv[i][7]; } else if (!strncmp(argv[i], "--address=", 10)) { coreAddress = &argv[i][10]; } else if (!strncmp(argv[i], "--plugin", 8)) { returnPlugin = true; } else if (!strncmp(argv[i], "--readingsplugin", 8)) { returnReadingsPlugin = true; } else if (!strncmp(argv[i], "--logLevel=", 11)) { logLevel = &argv[i][11]; } } #ifdef PROFILING char profilePath[200]{0}; if (getenv("FLEDGE_DATA")) { snprintf(profilePath, sizeof(profilePath), "%s/%s_Profile", getenv("FLEDGE_DATA"), myName.c_str()); } else if (getenv("FLEDGE_ROOT")) { snprintf(profilePath, sizeof(profilePath), "%s/data/%s_Profile", getenv("FLEDGE_ROOT"), myName.c_str()); } else { snprintf(profilePath, sizeof(profilePath), "/usr/local/fledge/data/%s_Profile", myName.c_str()); } mkdir(profilePath, 0777); chdir(profilePath); #endif if (returnPlugin == false && returnReadingsPlugin == false && daemonMode && makeDaemon() == -1) { // Failed to run in daemon mode cout << "Failed to run as deamon - proceeding in interactive mode." << endl; } if (returnPlugin && returnReadingsPlugin) { cout << "You can not specify --plugin and --readingsplugin together"; exit(1); } StorageService service(myName); service.setLogLevel(logLevel); Logger::getLogger()->setMinLevel(logLevel); if (returnPlugin) { cout << service.getPluginName() << " " << service.getPluginManagedStatus() << endl; } else if (returnReadingsPlugin) { cout << service.getReadingPluginName() << " " << service.getPluginManagedStatus() << endl; } else { service.start(coreAddress, corePort); } return 0; } /** * Detach the process from the terminal and run in the background. */ int makeDaemon() { pid_t pid; int logmask = setlogmask(0); /* create new process */ if ((pid = fork() ) == -1) { return -1; } else if (pid != 0) { exit (EXIT_SUCCESS); } // If we got here we are a child process // create new session and process group if (setsid() == -1) { return -1; } setlogmask(logmask); // Close stdin, stdout and stderr close(0); close(1); close(2); // redirect fd's 0,1,2 to /dev/null (void)open("/dev/null", O_RDWR); // stdin if (dup(0) == -1) {} // stdout Workaround GCC bug 66425 produces warning if (dup(0) == -1) {} // stderr Workaround GCC bug 66425 produces warning return 0; } /** * Constructor for the storage service */ StorageService::StorageService(const string& myName) : m_name(myName), readingPlugin(NULL), m_shutdown(false), m_requestRestart(false) { unsigned short servicePort; logger = new Logger(myName); // Do this first to make sure we have the right logger config = new StorageConfiguration(); signal(SIGSEGV, handler); signal(SIGILL, handler); signal(SIGBUS, handler); signal(SIGFPE, handler); signal(SIGABRT, handler); if (config->getValue("port") == NULL) { servicePort = 0; // default to a dynamic port } else { servicePort = (unsigned short)atoi(config->getValue("port")); } unsigned int threads = 1; if (config->hasValue("threads")) { threads = (unsigned int)atoi(config->getValue("threads")); } unsigned int workerPoolSize = 5; if (config->hasValue("workerPool")) { workerPoolSize = (unsigned int)atoi(config->getValue("workerPool")); } if (config->hasValue("logLevel")) { m_logLevel = config->getValue("logLevel"); } else { m_logLevel = "warning"; } logger->setMinLevel(m_logLevel); if (config->hasValue("timeout")) { m_timeout = strtol(config->getValue("timeout"), NULL, 10); } else { m_timeout = 5; } api = new StorageApi(servicePort, threads, workerPoolSize); api->setTimeout(m_timeout); } /** * Storage Service destructor */ StorageService::~StorageService() { delete api; delete config; delete logger; } /** * Start the storage service */ void StorageService::start(string& coreAddress, unsigned short corePort) { if (!loadPlugin()) { logger->fatal("Failed to load storage plugin."); return; } unsigned short managementPort = (unsigned short)0; if (config->getValue("managementPort")) { managementPort = (unsigned short)atoi(config->getValue("managementPort")); } ManagementApi management(SERVICE_NAME, managementPort); // Start managemenrt API api->initResources(); logger->info("Starting service..."); api->start(); management.registerService(this); management.start(); // Allow time for the listeners to start before we register sleep(1); if (! m_shutdown) { // Now register our service // TODO proper hostname lookup unsigned short listenerPort = api->getListenerPort(); unsigned short managementListener = management.getListenerPort(); ServiceRecord record(m_name, SERVICE_TYPE, "http", "localhost", listenerPort, managementListener); ManagementClient *client = new ManagementClient(coreAddress, corePort); client->registerService(record); // FOGL-7074 upgrade step try { ConfigCategory cat = client->getCategory("Storage"); string rp = cat.getValue("readingPlugin"); if (rp.empty()) { client->setCategoryItemValue("Storage", "readingPlugin", "Use main plugin"); } } catch (...) { // ignore } // Add the default configuration under the Advanced category unsigned int retryCount = 0; DefaultConfigCategory *conf = config->getDefaultCategory(); conf->setDescription(CATEGORY_DESCRIPTION); while (client->addCategory(*conf, false) == false && ++retryCount < 10) { sleep(2 * retryCount); } delete conf; vector<string> children1; children1.push_back(STORAGE_CATEGORY); ConfigCategories categories = client->getCategories(); try { bool found = false; for (unsigned int idx = 0; idx < categories.length(); idx++) { if (categories[idx]->getName().compare(ADVANCED) == 0) { client->addChildCategories(ADVANCED, children1); found = true; } } if (!found) { DefaultConfigCategory advanced(ADVANCED, "{}"); advanced.setDescription(ADVANCED); if (client->addCategory(advanced, true)) { client->addChildCategories(ADVANCED, children1); } } } catch (...) { } // Register for configuration changes to our category ConfigHandler *configHandler = ConfigHandler::getInstance(client); configHandler->registerCategory(this, STORAGE_CATEGORY); StoragePluginConfiguration *storagePluginConfig = storagePlugin->getConfig(); if (storagePluginConfig != NULL) { DefaultConfigCategory *conf = storagePluginConfig->getDefaultCategory(); conf->setDescription("Storage Plugin"); while (client->addCategory(*conf, true) == false && ++retryCount < 10) { sleep(2 * retryCount); } vector<string> children1; children1.push_back(conf->getName()); client->addChildCategories(STORAGE_CATEGORY, children1); // Register for configuration changes to our storage plugin category ConfigHandler *configHandler = ConfigHandler::getInstance(client); configHandler->registerCategory(this, conf->getName()); delete conf; } if (readingPlugin) { StoragePluginConfiguration *storagePluginConfig = readingPlugin->getConfig(); if (storagePluginConfig != NULL) { StoragePluginConfiguration *storagePluginConfig = readingPlugin->getConfig(); if (storagePluginConfig != NULL) { DefaultConfigCategory *conf = storagePluginConfig->getDefaultCategory(); conf->setDescription("Reading Plugin"); while (client->addCategory(*conf, true) == false && ++retryCount < 10) { sleep(2 * retryCount); } vector<string> children1; children1.push_back(conf->getName()); client->addChildCategories(STORAGE_CATEGORY, children1); // Regsiter for configuration changes to our reading category category ConfigHandler *configHandler = ConfigHandler::getInstance(client); configHandler->registerCategory(this, conf->getName()); } } } // Now we are running force the plugin names back to the configuration manager to // make sure they match what we are running. This can be out of sync if the storage // configuration cache has been manually reset or altered while Fledge was down client->setCategoryItemValue(STORAGE_CATEGORY, "plugin", config->getValue("plugin")); client->setCategoryItemValue(STORAGE_CATEGORY, "readingPlugin", config->getValue("readingPlugin")); // Check whether to enable storage performance monitor if (config->hasValue("perfmon")) { string perf = config->getValue("perfmon"); if (perf.compare("true") == 0) { api->getPerformanceMonitor()->setCollecting(true); } else { api->getPerformanceMonitor()->setCollecting(false); } } // Wait for all the API threads to complete api->wait(); if (readingPlugin) readingPlugin->pluginShutdown(); readingPlugin = NULL; if (storagePlugin) storagePlugin->pluginShutdown(); storagePlugin = NULL; // Clean shutdown, unregister the storage service if (m_requestRestart) client->restartService(); else client->unregisterService(); } else { api->wait(); } management.stop(); logger->info("Storage service shut down."); } /** * Stop the storage service/ */ void StorageService::stop() { logger->info("Stopping service...\n"); } /** * Load the configured storage plugin or plugins * * @return bool True if the plugins have been loaded and support the correct operations */ bool StorageService::loadPlugin() { PluginManager *manager = PluginManager::getInstance(); manager->setPluginType(PLUGIN_TYPE_ID_STORAGE); const char *plugin = config->getValue("plugin"); if (plugin == NULL) { logger->error("Unable to fetch plugin name from configuration.\n"); return false; } logger->info("Load storage plugin %s.", plugin); PLUGIN_HANDLE handle; string pname = plugin; if ((handle = manager->loadPlugin(pname, PLUGIN_TYPE_STORAGE)) != NULL) { storagePlugin = new StoragePlugin(pname, handle); if ((storagePlugin->getInfo()->options & SP_COMMON) == 0) { logger->error("Defined storage plugin %s does not support common table operations.\n", plugin); return false; } if (config->hasValue("raedingPlugin") == false && (storagePlugin->getInfo()->options & SP_READINGS) == 0) { logger->error("Defined storage plugin %s does not support readings operations.\n", plugin); return false; } api->setPlugin(storagePlugin); logger->info("Loaded storage plugin %s.", plugin); } else { return false; } if (! config->hasValue("readingPlugin")) { // Single plugin does everything return true; } const char *readingPluginName = config->getValue("readingPlugin"); if (! *readingPluginName) { // Single plugin does everything return true; } if (strcmp(readingPluginName, plugin) == 0 || strcmp(readingPluginName, "Use main plugin") == 0) { // Storage plugin and reading plugin are the same, or we have been // explicitly told to use the storage plugin for reading so no need // to add a reading plugin return true; } if (plugin == NULL) { logger->error("Unable to fetch reading plugin name from configuration.\n"); return false; } logger->info("Load reading plugin %s.", readingPluginName); string rpname = readingPluginName; if ((handle = manager->loadPlugin(rpname, PLUGIN_TYPE_STORAGE)) != NULL) { readingPlugin = new StoragePlugin(rpname, handle); if ((storagePlugin->getInfo()->options & SP_READINGS) == 0) { logger->error("Defined readings storage plugin %s does not support readings operations.\n", readingPluginName); return false; } api->setReadingPlugin(readingPlugin); logger->info("Loaded reading plugin %s.", readingPluginName); } else { return false; } return true; } /** * Shutdown request */ void StorageService::shutdown() { /* Stop recieving new requests and allow existing * requests to drain. */ m_shutdown = true; logger->info("Storage service shutdown in progress."); api->stopServer(); } /** * Restart request */ void StorageService::restart() { /* Stop recieving new requests and allow existing * requests to drain. */ m_shutdown = true; logger->info("Storage service shutdown in progress."); api->stopServer(); } /** * Configuration change notification */ void StorageService::configChange(const string& categoryName, const string& category) { logger->info("Configuration category change '%s'", categoryName.c_str()); if (!categoryName.compare(STORAGE_CATEGORY)) { config->updateCategory(category); if (m_logLevel.compare(config->getValue("logLevel"))) { m_logLevel = config->getValue("logLevel"); logger->setMinLevel(m_logLevel); } if (config->hasValue("timeout")) { long timeout = strtol(config->getValue("timeout"), NULL, 10); if (timeout != m_timeout) { api->setTimeout(timeout); m_timeout = timeout; } } if (config->hasValue("perfmon")) { string perf = config->getValue("perfmon"); if (perf.compare("true") == 0) { api->getPerformanceMonitor()->setCollecting(true); } else { api->getPerformanceMonitor()->setCollecting(false); } } return; } if (!categoryName.compare(getPluginName())) { storagePlugin->getConfig()->updateCategory(category); return; } if (config->hasValue("readingPlugin")) { const char *readingPluginName = config->getValue("readingPlugin"); if (!categoryName.compare(readingPluginName)) { readingPlugin->getConfig()->updateCategory(category); } } } /** * Return the name of the configured storage service */ string StorageService::getPluginName() { return string(config->getValue("plugin")); } /** * Return the managed status of the storage plugin */ string StorageService::getPluginManagedStatus() { return string(config->getValue("managedStatus")); } /** * Return the name of the configured reading plugin */ string StorageService::getReadingPluginName() { string rval = config->getValue("readingPlugin"); if (rval.empty()) { rval = config->getValue("plugin"); } return rval; } ================================================ FILE: C/services/storage/storage_api.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017-2018 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch, Massimiliano Pinto */ #include "client_http.hpp" #include "server_http.hpp" #include "storage_api.h" #include "storage_stats.h" #include "management_api.h" #include "logger.h" #include "plugin_exception.h" #include <rapidjson/document.h> #include <atomic> // Added for the default_resource example #include <algorithm> #include <fstream> #include <vector> #ifdef HAVE_OPENSSL #include "crypto.hpp" #endif #include <string_utils.h> #define WORKER_THREAD_POOL 1 // Enable worker threads for readings append and fetch #define WORKER_THREADS 1 // Threshold for logging number of threads in use for some "readings" wrappers #define MAX_WORKER_THREADS 5 /** * Definition of the Storage Service REST API */ StorageApi *StorageApi::m_instance = 0; using namespace std; using namespace rapidjson; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; /** * The following are a set of wrapper C functions that are registered with the HTTP Server * for each of the API entry points. These must be outside if a class as the library has no * mechanism to have a class isntance and hence can not provide a "this" pointer for the callback. * * These functions do the minumum work needed to find the singleton instance of the StorageAPI * class and call the appriopriate method of that class to the the actual work. */ /** * Wrapper function for the common insert API call. */ void commonInsertWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->commonInsert(response, request); } /** * Wrapper function for the common update API call. */ void commonUpdateWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->commonUpdate(response, request); } /** * Wrapper function for the common delete API call. */ void commonDeleteWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->commonDelete(response, request); } /** * Wrapper function for the common simle query API call. */ void commonSimpleQueryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->commonSimpleQuery(response, request); } /** * Wrapper function for the common query API call. */ void commonQueryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->commonQuery(response, request); } /** * Wrapper function for the default resource API call. This is called whenever * an unrecognised API call is received. */ void defaultWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->defaultResource(response, request); } /** * Called when an error occurs */ void on_error(__attribute__((unused)) shared_ptr<HttpServer::Request> request, __attribute__((unused)) const SimpleWeb::error_code &ec) { } /** * Wrapper function for the reading appendAPI call. */ void readingAppendWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); #if WORKER_THREAD_POOL api->queue(StorageOperation::ReadingAppend, request, response); #elif WORKER_THREADS std::atomic<int>* cnt = &(api->m_workers_count); // Check rurrent number of workers and log if threshold value is hit int tVal = std::atomic_load(cnt); if (tVal >= MAX_WORKER_THREADS) { Logger::getLogger()->warn("Storage API: readingAppend() is being run by a new thread. " "Current worker threads count %d exceeds the warning limit of %d allowed threads hit.", tVal, MAX_WORKER_THREADS); } // Start a new thread thread work_thread([api, cnt, response, request] { // Increase count std::atomic_fetch_add(cnt, 1); api->readingAppend(response, request); // Decrease counter std::atomic_fetch_sub(cnt, 1); }); // Detach the new thread work_thread.detach(); #else api->readingAppend(response, request); #endif } /** * Wrapper function for the reading fetch API call. */ void readingFetchWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); #if WORKER_THREAD_POOL api->queue(StorageOperation::ReadingFetch, request, response); #elif WORKER_THREADS std::atomic<int>* cnt = &(api->m_workers_count); // Check rurrent number of workers and log if threshold value is hit int tVal = std::atomic_load(cnt); if (tVal >= MAX_WORKER_THREADS) { Logger::getLogger()->warn("Storage API: readingFetch() is being run by a new thread. " "Current worker threads count %d exceeds the warning limit of %d allowed threads hit.", tVal, MAX_WORKER_THREADS); } // Start a new thread thread work_thread([api, cnt, response, request] { // Increase count std::atomic_fetch_add(cnt, 1); api->readingFetch(response, request); // Decrease counter std::atomic_fetch_sub(cnt, 1); }); // Detach the new thread work_thread.detach(); #else api->readingFetch(response, request); #endif } /** * Wrapper function for the reading query API call. */ void readingQueryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); #if WORKER_THREAD_POOL api->queue(StorageOperation::ReadingQuery, request, response); #else api->readingQuery(response, request); #endif } /** * Wrapper function for the reading purge API call. */ void readingPurgeWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); #if WORKER_THREAD_POOL api->queue(StorageOperation::ReadingPurge, request, response); #elif WORKER_THREADS std::atomic<int>* cnt = &(api->m_workers_count); // Check rurrent number of workers and log if threshold value is hit int tVal = std::atomic_load(cnt); if (tVal >= MAX_WORKER_THREADS) { Logger::getLogger()->warn("Storage API: readingPurge() is being run by a new thread. " "Current worker threads count %d exceeds the warning limit of %d allowed threads hit.", tVal, MAX_WORKER_THREADS); } // Start a new thread thread work_thread([api, cnt, response, request] { // Increase count std::atomic_fetch_add(cnt, 1); api->readingPurge(response, request); // Decrease counter std::atomic_fetch_sub(cnt, 1); }); // Detach the new thread work_thread.detach(); #else api->readingPurge(response, request); #endif } /** * Wrapper function for the reading purge API call. */ void readingRegisterWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->readingRegister(response, request); } /** * Wrapper function for the reading purge API call. */ void readingUnregisterWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->readingUnregister(response, request); } /** * Wrapper function for the table interest register API call. */ void tableRegisterWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->tableRegister(response, request); } /** * Wrapper function for the table interest unregister API call. */ void tableUnregisterWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->tableUnregister(response, request); } /** * Wrapper function for the create snapshot API call. */ void createTableSnapshotWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->createTableSnapshot(response, request); } /** * Wrapper function for the load snapshot API call. */ void loadTableSnapshotWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->loadTableSnapshot(response, request); } /** * Wrapper function for the delete snapshot API call. */ void deleteTableSnapshotWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->deleteTableSnapshot(response, request); } /** * Wrapper function for the delete snapshot API call. */ void getTableSnapshotsWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->getTableSnapshots(response, request); } /** * Wrapper function for the create storage stream API call. */ void createStorageStreamWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->createStorageStream(response, request); } /** * Wrapper function for the create storage stream API call. */ void createStorageSchemaWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->createStorageSchema(response, request); } /** * Wrapper function for the insert into storage table API call. */ void storageTableInsertWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->storageTableInsert(response, request); } /** * Wrapper function for the simple query in storage table API call. */ void storageTableSimpleQueryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->storageTableSimpleQuery(response, request); } /** * Wrapper function for the update into storage table API call. */ void storageTableUpdateWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->storageTableUpdate(response, request); } /** * Wrapper function for the update into storage table API call. */ void storageTableDeleteWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->storageTableDelete(response, request); } /** * Wrapper function for the update into storage table API call. */ void storageTableQueryWrapper(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { StorageApi *api = StorageApi::getInstance(); api->storageTableQuery(response, request); } /** * Construct the singleton Storage API */ StorageApi::StorageApi(const unsigned short port, const unsigned int threads, const unsigned int poolSize) : m_thread(NULL), readingPlugin(0), streamHandler(0) { m_port = port; m_threads = threads; m_server = new HttpServer(); m_server->config.port = port; m_server->config.thread_pool_size = threads; m_server->config.timeout_request = 60; m_perfMonitor = NULL; m_workerPoolSize = poolSize; m_workers.resize(poolSize, NULL); StorageApi::m_instance = this; } /** * Destructor for the storage API class. There is only ever one StorageApi class * in existance and it lives for the entire duration of the storage service, so this * is really for completeness rather than any pracitical use. */ StorageApi::~StorageApi() { if (m_server) { delete m_server; } m_instance = NULL; if (m_thread) { delete m_thread; } if (m_perfMonitor) { delete m_perfMonitor; } for (unsigned int i = 0; i < m_workerPoolSize; i++) { if (m_workers[i]) delete m_workers[i]; } } /** * Return the singleton instance of the StorageAPI class */ StorageApi *StorageApi::getInstance() { if (m_instance == NULL) { Logger::getLogger()->warn("Creating a default storage API instance, tuning parameters will be ignored"); m_instance = new StorageApi(0, 1, 5); } return m_instance; } /** * Return the current listener port */ unsigned short StorageApi::getListenerPort() { return m_server->getLocalPort(); } /** * Initialise the API entry points for the common data resource and * the readings resource. */ void StorageApi::initResources() { // Initialise workers threads counter m_workers_count = ATOMIC_VAR_INIT(0); // Initialise the API entry points m_server->resource[COMMON_ACCESS]["POST"] = commonInsertWrapper; m_server->resource[COMMON_ACCESS]["GET"] = commonSimpleQueryWrapper; m_server->resource[COMMON_QUERY]["PUT"] = commonQueryWrapper; m_server->resource[COMMON_ACCESS]["PUT"] = commonUpdateWrapper; m_server->resource[COMMON_ACCESS]["DELETE"] = commonDeleteWrapper; m_server->default_resource["POST"] = defaultWrapper; m_server->default_resource["PUT"] = defaultWrapper; m_server->default_resource["GET"] = defaultWrapper; m_server->default_resource["DELETE"] = defaultWrapper; m_server->resource[READING_INTEREST]["POST"] = readingRegisterWrapper; m_server->resource[READING_INTEREST]["DELETE"] = readingUnregisterWrapper; m_server->resource[TABLE_INTEREST]["POST"] = tableRegisterWrapper; m_server->resource[TABLE_INTEREST]["DELETE"] = tableUnregisterWrapper; m_server->resource[CREATE_TABLE_SNAPSHOT]["POST"] = createTableSnapshotWrapper; m_server->resource[LOAD_TABLE_SNAPSHOT]["PUT"] = loadTableSnapshotWrapper; m_server->resource[DELETE_TABLE_SNAPSHOT]["DELETE"] = deleteTableSnapshotWrapper; m_server->resource[GET_TABLE_SNAPSHOTS]["GET"] = getTableSnapshotsWrapper; m_server->resource[READING_ACCESS]["POST"] = readingAppendWrapper; m_server->resource[READING_ACCESS]["GET"] = readingFetchWrapper; m_server->resource[READING_QUERY]["PUT"] = readingQueryWrapper; m_server->resource[READING_PURGE]["PUT"] = readingPurgeWrapper; m_server->resource[CREATE_STORAGE_STREAM]["POST"] = createStorageStreamWrapper; m_server->resource[STORAGE_SCHEMA]["POST"] = createStorageSchemaWrapper; m_server->resource[STORAGE_TABLE_ACCESS]["POST"] = storageTableInsertWrapper; m_server->resource[STORAGE_TABLE_ACCESS]["GET"] = storageTableSimpleQueryWrapper; m_server->resource[STORAGE_TABLE_ACCESS]["PUT"] = storageTableUpdateWrapper; m_server->resource[STORAGE_TABLE_ACCESS]["DELETE"] = storageTableDeleteWrapper; m_server->resource[STORAGE_TABLE_QUERY]["PUT"] = storageTableQueryWrapper; m_server->on_error = on_error; ManagementApi *management = ManagementApi::getInstance(); management->registerStats(&stats); // Create StoragePerformanceMonitor object fr direct monitorind data saving m_perfMonitor = new StoragePerformanceMonitor("Storage", this); } void startService() { StorageApi::getInstance()->startServer(); } /** * Static method used to start the thread */ static void workerStart() { StorageApi *api = StorageApi::getInstance(); api->worker(); } /** * Start the HTTP server */ void StorageApi::start() { m_thread = new thread(startService); m_shutdown = false; for (unsigned int i = 0; i < m_workerPoolSize; i++) { m_workers[i] = new thread(workerStart); } } void StorageApi::startServer() { m_server->start(); } void StorageApi::stopServer() { m_server->stop(); } /** * Wait for the HTTP server to shutdown */ void StorageApi::wait() { m_thread->join(); m_shutdown = true; m_queueCV.notify_all(); for (unsigned int i = 0; i < m_workerPoolSize; i++) { if (m_workers[i]) { m_workers[i]->join(); delete m_workers[i]; m_workers[i] = NULL; } } } /** * Connect with the storage plugin */ void StorageApi::setPlugin(StoragePlugin *plugin) { this->plugin = plugin; } /** * Connect with the storage plugin */ void StorageApi::setReadingPlugin(StoragePlugin *plugin) { this->readingPlugin = plugin; } /** * Construct an HTTP response with the 200 OK return code using the payload * provided. * * @param response The response stream to send the response on * @param payload The payload to send */ void StorageApi::respond(shared_ptr<HttpServer::Response> response, const string& payload) { *response << "HTTP/1.1 200 OK\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } /** * The worker thread */ void StorageApi::worker() { unique_lock<mutex> lck(m_queueMutex); while (!m_shutdown) { while (!m_queue.empty()) { StorageOperation *op = m_queue.front(); m_queue.pop(); lck.unlock(); switch (op->m_operation) { case StorageOperation::ReadingAppend: readingAppend(op->m_response, op->m_request); break; case StorageOperation::ReadingFetch: readingFetch(op->m_response, op->m_request); break; case StorageOperation::ReadingPurge: readingPurge(op->m_response, op->m_request); break; case StorageOperation::ReadingQuery: readingQuery(op->m_response, op->m_request); break; default: Logger::getLogger()->error("Internal error, unknown operation %d requested of storage worker thread", op->m_operation); break; } delete op; lck.lock(); } m_queueCV.wait(lck); } } /** * Append a request to the readings request queue * * If the queue is starting to get long delay the return as * a primitive way to throttle incoming requests * * @param op The operation to perform * @param request The HTTP request * @param response The HTTP response */ void StorageApi::queue(StorageOperation::Operations op, shared_ptr<HttpServer::Request> request, shared_ptr<HttpServer::Response> response) { unique_lock<mutex> lck(m_queueMutex); m_queue.push(new StorageOperation(op, request, response)); m_queueCV.notify_all(); unsigned int length = m_queue.size(); m_perfMonitor->collect("Worker Queue length", length); if (length > 10) { lck.unlock(); usleep(1000 * length); if (length % 10 == 0) Logger::getLogger()->warn("Reading request queue now at %d", length); } } /** * Construct an HTTP response with the specified return code using the payload * provided. * * @param response The response stream to send the response on * @param code The HTTP esponse code to send * @param payload The payload to send */ void StorageApi::respond(shared_ptr<HttpServer::Response> response, SimpleWeb::StatusCode code, const string& payload) { *response << "HTTP/1.1 " << status_code(code) << "\r\nContent-Length: " << payload.length() << "\r\n" << "Content-type: application/json\r\n\r\n" << payload; } /** * Perform an insert into a table of the data provided in the payload. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::commonInsert(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string tableName; string payload; string responsePayload; stats.commonInsert++; try { tableName = request->path_match[TABLE_NAME_COMPONENT]; payload = request->content.string(); int rval = plugin->commonInsert(tableName, payload); if (rval != -1) { registry.processTableInsert(tableName, payload); responsePayload = "{ \"response\" : \"inserted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); if (m_perfMonitor->isCollecting()) { m_perfMonitor->collect("insert rows " + tableName, rval); m_perfMonitor->collect("insert Payload Size " + tableName, (long)payload.length()); } } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform an update on a table of the data provided in the payload. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::commonUpdate(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string tableName; string payload; string responsePayload; auto header_seq = request->header.find("SeqNum"); if(header_seq != request->header.end()) { string threadId = header_seq->second.substr(0, header_seq->second.find("_")); int seqNum = stoi(header_seq->second.substr(header_seq->second.find("_")+1)); { std::unique_lock<std::mutex> lock(mtx_seqnum_map); auto it = m_seqnum_map.find(threadId); if (it != m_seqnum_map.end()) { if (seqNum <= it->second.first) { responsePayload = "{ \"response\" : \"updated\", \"rows_affected\" : "; responsePayload += to_string(0); responsePayload += " }"; Logger::getLogger()->info("%s:%d: Repeat/old request: responding with zero response - threadId=%s, last seen seqNum for this threadId=%d, HTTP request header seqNum=%d", __FUNCTION__, __LINE__, threadId.c_str(), it->second.first, seqNum); respond(response, responsePayload); return; } // remove this threadId from LRU list; will add this to front of LRU list below seqnum_map_lru_list.erase(m_seqnum_map[threadId].second); } else { if (seqnum_map_lru_list.size() == max_entries_in_seqnum_map) // LRU list is full { //delete least recently used element string last = seqnum_map_lru_list.back(); seqnum_map_lru_list.pop_back(); m_seqnum_map.erase(last); } } // insert an entry for threadId at front of LRU queue seqnum_map_lru_list.push_front(threadId); m_seqnum_map[threadId] = make_pair(seqNum, seqnum_map_lru_list.begin()); } } stats.commonUpdate++; try { tableName = request->path_match[TABLE_NAME_COMPONENT]; payload = request->content.string(); int rval = plugin->commonUpdate(tableName, payload); if (rval != -1) { registry.processTableUpdate(tableName, payload); responsePayload = "{ \"response\" : \"updated\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); if (m_perfMonitor->isCollecting()) { m_perfMonitor->collect("update rows " + tableName, rval); m_perfMonitor->collect("update Payload Size " + tableName, (long)payload.length()); } } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform a simple query on the table using the query parameters as conditions * TODO make this work for multiple column queries * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::commonSimpleQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string tableName; SimpleWeb::CaseInsensitiveMultimap query; string payload; stats.commonSimpleQuery++; try { tableName = request->path_match[TABLE_NAME_COMPONENT]; query = request->parse_query_string(); if (query.size() > 0) { payload = "{ \"where\" : { "; for(auto ¶m : query) { payload = payload + "\"column\" : \""; payload = payload + param.first; payload = payload + "\", \"condition\" : \"=\", \"value\" : \""; payload = payload + param.second; payload = payload + "\""; } payload = payload + "} }"; } char *pluginResult = plugin->commonRetrieve(tableName, payload); if (pluginResult) { string res = pluginResult; respond(response, res); free(pluginResult); } else { string responsePayload; mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform query on a table using the JSON encoded query in the payload * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::commonQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string tableName; string payload; stats.commonQuery++; try { tableName = request->path_match[TABLE_NAME_COMPONENT]; payload = request->content.string(); char *pluginResult = plugin->commonRetrieve(tableName, payload); if (pluginResult) { string res = pluginResult; respond(response, res); free(pluginResult); } else { string responsePayload; mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform a delete on a table using the condition encoded in the JSON payload * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::commonDelete(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string tableName; string payload; string responsePayload; stats.commonDelete++; try { tableName = request->path_match[TABLE_NAME_COMPONENT]; payload = request->content.string(); int rval = plugin->commonDelete(tableName, payload); if (rval != -1) { registry.processTableDelete(tableName, payload); responsePayload = "{ \"response\" : \"deleted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); if (m_perfMonitor->isCollecting()) { m_perfMonitor->collect("delete rows " + tableName, rval); m_perfMonitor->collect("delete Payload Size " + tableName, (long)payload.length()); } } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform an append operation on the readings. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::readingAppend(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload; string responsePayload; struct timeval tStart, tEnd; if (m_perfMonitor->isCollecting()) { gettimeofday(&tStart, NULL); } auto header_seq = request->header.find("SeqNum"); if(header_seq != request->header.end()) { string threadId = header_seq->second.substr(0, header_seq->second.find("_")); int seqNum = stoi(header_seq->second.substr(header_seq->second.find("_")+1)); { std::unique_lock<std::mutex> lock(mtx_seqnum_map); auto it = m_seqnum_map.find(threadId); if (it != m_seqnum_map.end()) { if (seqNum <= it->second.first) { responsePayload = "{ \"response\" : \"appended\", \"readings_added\" : "; responsePayload += to_string(0); responsePayload += " }"; Logger::getLogger()->info("%s:%d: Repeat/old request: responding with zero response - threadId=%s, last seen seqNum for this threadId=%d, HTTP request header seqNum=%d", __FUNCTION__, __LINE__, threadId.c_str(), it->second.first, seqNum); respond(response, responsePayload); return; } // remove this threadId from LRU list; will add this to front of LRU list below seqnum_map_lru_list.erase(m_seqnum_map[threadId].second); } else { if (seqnum_map_lru_list.size() == max_entries_in_seqnum_map) // LRU list is full { //delete least recently used element string last = seqnum_map_lru_list.back(); seqnum_map_lru_list.pop_back(); m_seqnum_map.erase(last); } } // insert an entry for threadId at front of LRU queue seqnum_map_lru_list.push_front(threadId); m_seqnum_map[threadId] = make_pair(seqNum, seqnum_map_lru_list.begin()); } } stats.readingAppend++; try { payload = request->content.string(); int rval = (readingPlugin ? readingPlugin : plugin)->readingsAppend(payload); if (rval != -1) { registry.process(payload); responsePayload = "{ \"response\" : \"appended\", \"readings_added\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); if (m_perfMonitor->isCollecting()) { gettimeofday(&tEnd, NULL); m_perfMonitor->collect("Reading Append Rows " + (readingPlugin ? readingPlugin : plugin)->getName(), rval); m_perfMonitor->collect("Reading Append PayloadSize " + (readingPlugin ? readingPlugin : plugin)->getName(), (long)payload.length()); struct timeval diff; timersub(&tEnd, &tStart, &diff); m_perfMonitor->collect("Reading Append Time (ms)", diff.tv_sec * 1000 + diff.tv_usec / 1000); } } else { mapError(responsePayload, (readingPlugin ? readingPlugin : plugin)->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } //respond(response, responsePayload); } catch (exception& ex) { internalError(response, ex); } } /** * Fetch a block of readings. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::readingFetch(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { SimpleWeb::CaseInsensitiveMultimap query; unsigned long id = 0; unsigned long count = 0; stats.readingFetch++; try { query = request->parse_query_string(); auto search = query.find("id"); if (search == query.end()) { string payload = "{ \"error\" : \"Missing query parameter id\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); return; } else { id = (unsigned long)atol(search->second.c_str()); } search = query.find("count"); if (search == query.end()) { string payload = "{ \"error\" : \"Missing query parameter count\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); return; } else { count = (unsigned)atol(search->second.c_str()); } // Get plugin data char *responsePayload = (readingPlugin ? readingPlugin : plugin)->readingsFetch(id, count); string res = responsePayload; // Reply to client respond(response, res); // Free plugin data free(responsePayload); } catch (exception& ex) { internalError(response, ex); } } /** * Perform a query on a set of readings * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::readingQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload; stats.readingQuery++; try { payload = request->content.string(); char *resultSet = (readingPlugin ? readingPlugin : plugin)->readingsRetrieve(payload); string res = resultSet; respond(response, res); free(resultSet); } catch (exception& ex) { internalError(response, ex); } } /** * Purge the readings * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::readingPurge(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { SimpleWeb::CaseInsensitiveMultimap query; unsigned long age = 0; unsigned long size = 0; unsigned long lastSent = 0; unsigned int flagsMask = 0; string flags; string asset; bool byAsset = false; static std::atomic<bool> already_running(false); if (already_running) { string payload = "{ \"error\" : \"Previous instance of purge is still running, not starting another one.\" }"; respond(response, SimpleWeb::StatusCode::client_error_too_many_requests, payload); return; } already_running.store(true); stats.readingPurge++; try { query = request->parse_query_string(); auto search = query.find("age"); if (search != query.end()) { age = (unsigned)atol(search->second.c_str()); } search = query.find("size"); if (search != query.end()) { size = (unsigned)atol(search->second.c_str()); } search = query.find("asset"); if (search != query.end()) { asset = search->second; byAsset = true; } search = query.find("sent"); if (search == query.end()) { if (!byAsset) { string payload = "{ \"error\" : \"Missing query parameter sent\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); already_running.store(false); return; } } else { lastSent = (unsigned)atol(search->second.c_str()); } search = query.find("flags"); if (search != query.end()) { flags = search->second; Logger::getLogger()->debug("%s - flags :%s:", __FUNCTION__, flags.c_str()); // TODO Turn flags into a bitmap if (flags.compare(PURGE_FLAG_RETAIN_ANY) == 0) { flagsMask |= STORAGE_PURGE_RETAIN_ANY; } else if ( (flags.compare(PURGE_FLAG_RETAIN) == 0) || // Backward compability (flags.compare(PURGE_FLAG_RETAIN_ALL) == 0) ) { flagsMask |= STORAGE_PURGE_RETAIN_ALL; } else if (flags.compare(PURGE_FLAG_PURGE) == 0) { flagsMask &= (~STORAGE_PURGE_RETAIN_ANY); flagsMask &= (~STORAGE_PURGE_RETAIN_ALL); } Logger::getLogger()->debug("%s - flagsMask :%d:", __FUNCTION__, flagsMask); } char *purged = NULL; if (age) { purged = (readingPlugin ? readingPlugin : plugin)->readingsPurge(age, flagsMask, lastSent); } else if (size) { purged = (readingPlugin ? readingPlugin : plugin)->readingsPurge(size, flagsMask|STORAGE_PURGE_SIZE, lastSent); } else if (byAsset) { purged = (readingPlugin ? readingPlugin : plugin)->readingsPurgeAsset(asset); } else { string payload = "{ \"error\" : \"Must either specify age or size parameter\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); already_running.store(false); return; } respond(response, purged); free(purged); } /** Handle PluginNotImplementedException exception here */ catch (PluginNotImplementedException& ex) { string payload = "{ \"error\" : \""; payload += ex.what(); payload += "\" }"; /** Return HTTP code 400 with message from storage plugin */ respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); already_running.store(false); return; } /** Handle general exception */ catch (exception& ex) { internalError(response, ex); already_running.store(false); return; } already_running.store(false); } /** * Register interest in readings for an asset */ void StorageApi::readingRegister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string asset; string payload; Document doc; payload = request->content.string(); // URL decode asset name asset = urlDecode(request->path_match[ASSET_NAME_COMPONENT]); doc.Parse(payload.c_str()); if (doc.HasParseError()) { string resp = "{ \"error\" : \"Badly formed payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } else { if (doc.HasMember("url")) { registry.registerAsset(asset, doc["url"].GetString()); string resp = " { \"" + asset + "\" : \"registered\" }"; respond(response, resp); } else { string resp = "{ \"error\" : \"Missing url element in payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } } } /** * Unregister interest in readings for an asset */ void StorageApi::readingUnregister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string asset; string payload; Document doc; payload = request->content.string(); // URL decode asset name asset = urlDecode(request->path_match[ASSET_NAME_COMPONENT]); doc.Parse(payload.c_str()); if (doc.HasParseError()) { string resp = "{ \"error\" : \"Badly formed payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } else { if (doc.HasMember("url")) { registry.unregisterAsset(asset, doc["url"].GetString()); string resp = " { \"" + asset + "\" : \"unregistered\" }"; respond(response, resp); } else { string resp = "{ \"error\" : \"Missing url element in payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } } } /** * Register interest in readings for an asset */ void StorageApi::tableRegister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string table; string payload; Document doc; payload = request->content.string(); // URL decode table name table = urlDecode(request->path_match[TABLE_NAME_COMPONENT]); doc.Parse(payload.c_str()); if (doc.HasParseError()) { string resp = "{ \"error\" : \"Badly formed payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } else { if (doc.HasMember("url")) { registry.registerTable(table, payload); string resp = " { \"" + table + "\" : \"registered\" }"; respond(response, resp); } else { string resp = "{ \"error\" : \"Missing url element in payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } } } /** * Unregister interest in readings for an asset */ void StorageApi::tableUnregister(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string table; string payload; Document doc; payload = request->content.string(); // URL decode table name table = urlDecode(request->path_match[TABLE_NAME_COMPONENT]); doc.Parse(payload.c_str()); if (doc.HasParseError()) { string resp = "{ \"error\" : \"Badly formed payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } else { if (doc.HasMember("url")) { registry.unregisterTable(table, payload); string resp = " { \"" + table + "\" : \"unregistered\" }"; respond(response, resp); } else { string resp = "{ \"error\" : \"Missing url element in payload\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); } } } /** * Create a stream for high speed storage ingestion * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::createStorageStream(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string responsePayload; (void)(request); // Surpress unused arguemnt warning try { if (!streamHandler) { streamHandler = new StreamHandler(this); } uint32_t token; uint32_t port = streamHandler->createStream(&token); if (port != 0) { responsePayload = "{ \"port\":"; responsePayload += to_string(port); responsePayload += ", \"token\":"; responsePayload += to_string(token); responsePayload += " }"; respond(response, responsePayload); } else { respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Append the readings that have arrived via a stream to the storage plugin * * @param readings A Null terminated array of points to ReadingStream structures * @param commit A flag to commit the readings block */ bool StorageApi::readingStream(ReadingStream **readings, bool commit) { if ((readingPlugin ? readingPlugin : plugin)->hasStreamSupport()) { return (readingPlugin ? readingPlugin : plugin)->readingStream(readings, commit); } else { // Plugin does not support streaming input ostringstream convert; char ts[60], micro_s[10]; convert << "{\"readings\":["; for (int i = 0; readings[i]; i++) { if (i > 0) convert << ","; convert << "{\"asset_code\":\""; convert << readings[i]->assetCode; convert << "\",\"user_ts\":\""; struct tm timeinfo; gmtime_r(&readings[i]->userTs.tv_sec, &timeinfo); std::strftime(ts, sizeof(ts), "%Y-%m-%d %H:%M:%S", &timeinfo); snprintf(micro_s, sizeof(micro_s), ".%06lu", readings[i]->userTs.tv_usec); convert << ts << micro_s; convert << "\",\"reading\":"; convert << &(readings[i]->assetCode[readings[i]->assetCodeLength]); convert << "}"; } convert << "]}"; Logger::getLogger()->debug("Fallback created payload: %s", convert.str().c_str()); (readingPlugin ? readingPlugin : plugin)->readingsAppend(convert.str()); } return false; } /** * Handle a bad URL endpoint call */ void StorageApi::defaultResource(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload; payload = "{ \"error\" : \"Unsupported URL: " + request->path + "\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, payload); } /** * Handle a exception by sendign back an internal error */ void StorageApi::internalError(shared_ptr<HttpServer::Response> response, const exception& ex) { string payload = "{ \"Exception\" : \""; payload = payload + string(ex.what()); payload = payload + "\""; Logger *logger = Logger::getLogger(); logger->error("StorgeApi Internal Error: %s\n", ex.what()); respond(response, SimpleWeb::StatusCode::server_error_internal_server_error, payload); } void StorageApi::mapError(string& payload, PLUGIN_ERROR *lastError) { char *ptr, *ptr1, *buf = new char[strlen(lastError->message) * 2 + 1]; ptr = buf; ptr1 = lastError->message; while (*ptr1) { if (*ptr1 == '"') *ptr++ = '\\'; *ptr++ = *ptr1++; if (*ptr1 == '\n') ptr1++; } *ptr = 0; payload = "{ \"entryPoint\" : \""; payload = payload + lastError->entryPoint; payload = payload + "\", \"message\" : \""; payload = payload + buf; payload = payload + "\", \"retryable\" : "; payload = payload + (lastError->retryable ? "true" : "false"); payload = payload + "}"; delete[] buf; } /** * Create a table snapshot */ void StorageApi::createTableSnapshot(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string sTable; string payload; Document doc; payload = request->content.string(); sTable = request->path_match[TABLE_NAME_COMPONENT]; doc.Parse(payload.c_str()); if (!doc.HasMember("id")) { string resp = "{ \"error\" : \"Missing id element in payload for create snapshot\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); return; } string responsePayload; string sId = doc["id"].GetString(); // call plugin method if (plugin->createTableSnapshot(sTable, sId) < 0) { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } else { responsePayload = "{\"created\": {\"id\": \"" + sId; responsePayload += "\", \"table\": \"" + sTable + "\"} }"; respond(response, responsePayload); } } /** * Load a table snapshot */ void StorageApi::loadTableSnapshot(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string sId; string sTable; string payload; payload = request->content.string(); sTable = request->path_match[TABLE_NAME_COMPONENT]; sId = request->path_match[SNAPSHOT_ID_COMPONENT]; if (sId.empty()) { string resp = "{ \"error\" : \"Missing id element in payload for load snapshot\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); return; } string responsePayload; if (plugin->loadTableSnapshot(sTable, sId) < 0) { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } else { responsePayload = "{\"loaded\": {\"id\": \"" + sId; responsePayload += "\", \"table\": \"" + sTable + "\"} }"; respond(response, responsePayload); } } /** * Delete a table snapshot */ void StorageApi::deleteTableSnapshot(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string sId; string sTable; string payload; payload = request->content.string(); sTable = request->path_match[TABLE_NAME_COMPONENT]; sId = request->path_match[SNAPSHOT_ID_COMPONENT]; if (sId.empty()) { string resp = "{ \"error\" : \"Missing id element in payload fopr delete snapshot\" }"; respond(response, SimpleWeb::StatusCode::client_error_bad_request, resp); return; } string responsePayload; if (plugin->deleteTableSnapshot(sTable, sId) < 0) { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } else { responsePayload = "{\"deleted\": {\"id\": \"" + sId; responsePayload += "\", \"table\": \"" + sTable + "\"} }"; respond(response, responsePayload); } } /** * Get list of a table snapshots */ void StorageApi::getTableSnapshots(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string sTable; string payload; try { payload = request->content.string(); sTable = request->path_match[TABLE_NAME_COMPONENT]; // Get plugin data char* pluginResult = plugin->getTableSnapshots(sTable); if (pluginResult) { string res = pluginResult; respond(response, res); free(pluginResult); } else { string responsePayload; mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform an create table and create index for schema provided in the payload. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::createStorageSchema(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string payload; string responsePayload; try { payload = request->content.string(); int rval = plugin->createSchema(payload); if (rval != -1) { responsePayload = "{ \"Successfully created schema\"} "; respond(response, responsePayload); } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform an insert table operation. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::storageTableInsert(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string schemaName; string tableName; string payload; string responsePayload; stats.commonInsert++; try { schemaName = request->path_match[STORAGE_SCHEMA_NAME_COMPONENT]; tableName = request->path_match[STORAGE_TABLE_NAME_COMPONENT]; payload = request->content.string(); int rval = plugin->commonInsert(tableName, payload, const_cast<char*>(schemaName.c_str())); if (rval != -1) { if (m_perfMonitor->isCollecting()) { m_perfMonitor->collect("insert rows " + tableName, rval); m_perfMonitor->collect("insert Payload Size " + tableName, (long)payload.length()); } registry.processTableInsert(tableName, payload); responsePayload = "{ \"response\" : \"inserted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform an update on a table of the data provided in the payload. * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::storageTableUpdate(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string schemaName; string tableName; string payload; string responsePayload; auto header_seq = request->header.find("SeqNum"); if(header_seq != request->header.end()) { string threadId = header_seq->second.substr(0, header_seq->second.find("_")); int seqNum = stoi(header_seq->second.substr(header_seq->second.find("_")+1)); { std::unique_lock<std::mutex> lock(mtx_seqnum_map); auto it = m_seqnum_map.find(threadId); if (it != m_seqnum_map.end()) { if (seqNum <= it->second.first) { responsePayload = "{ \"response\" : \"updated\", \"rows_affected\" : "; responsePayload += to_string(0); responsePayload += " }"; Logger::getLogger()->info("%s:%d: Repeat/old request: responding with zero response - threadId=%s, last seen seqNum for this threadId=%d, HTTP request header seqNum=%d", __FUNCTION__, __LINE__, threadId.c_str(), it->second.first, seqNum); respond(response, responsePayload); return; } // remove this threadId from LRU list; will add this to front of LRU list below seqnum_map_lru_list.erase(m_seqnum_map[threadId].second); } else { if (seqnum_map_lru_list.size() == max_entries_in_seqnum_map) // LRU list is full { //delete least recently used element string last = seqnum_map_lru_list.back(); seqnum_map_lru_list.pop_back(); m_seqnum_map.erase(last); } } // insert an entry for threadId at front of LRU queue seqnum_map_lru_list.push_front(threadId); m_seqnum_map[threadId] = make_pair(seqNum, seqnum_map_lru_list.begin()); } } stats.commonUpdate++; try { schemaName = request->path_match[STORAGE_SCHEMA_NAME_COMPONENT]; tableName = request->path_match[STORAGE_TABLE_NAME_COMPONENT]; payload = request->content.string(); int rval = plugin->commonUpdate(tableName, payload, const_cast<char*>(schemaName.c_str())); if (rval != -1) { if (m_perfMonitor->isCollecting()) { m_perfMonitor->collect("update rows " + tableName, rval); m_perfMonitor->collect("update Payload Size " + tableName, (long)payload.length()); } registry.processTableUpdate(tableName, payload); responsePayload = "{ \"response\" : \"updated\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform a delete on a table using the condition encoded in the JSON payload * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::storageTableDelete(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string schemaName; string tableName; string payload; string responsePayload; stats.commonDelete++; try { schemaName = request->path_match[STORAGE_SCHEMA_NAME_COMPONENT]; tableName = request->path_match[STORAGE_TABLE_NAME_COMPONENT]; payload = request->content.string(); int rval = plugin->commonDelete(tableName, payload, const_cast<char*>(schemaName.c_str())); if (rval != -1) { if (m_perfMonitor->isCollecting()) { m_perfMonitor->collect("delete rows " + tableName, rval); m_perfMonitor->collect("delete Payload Size " + tableName, (long)payload.length()); } registry.processTableDelete(tableName, payload); responsePayload = "{ \"response\" : \"deleted\", \"rows_affected\" : "; responsePayload += to_string(rval); responsePayload += " }"; respond(response, responsePayload); } else { mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } }catch (exception& ex) { internalError(response, ex); } } /** * Perform a simple query on the table using the query parameters as conditions * TODO make this work for multiple column queries * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::storageTableSimpleQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string schemaName; string tableName; SimpleWeb::CaseInsensitiveMultimap query; string payload; stats.commonSimpleQuery++; try { schemaName = request->path_match[STORAGE_SCHEMA_NAME_COMPONENT]; tableName = request->path_match[STORAGE_TABLE_NAME_COMPONENT]; query = request->parse_query_string(); if (query.size() > 0) { payload = "{ \"where\" : { "; for(auto ¶m : query) { payload = payload + "\"column\" : \""; payload = payload + param.first; payload = payload + "\", \"condition\" : \"=\", \"value\" : \""; payload = payload + param.second; payload = payload + "\""; } payload = payload + "} }"; } char *pluginResult = plugin->commonRetrieve(tableName, payload, const_cast<char*>(schemaName.c_str())); if (pluginResult) { string res = pluginResult; respond(response, res); free(pluginResult); } else { string responsePayload; mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } /** * Perform query on a table using the JSON encoded query in the payload * * @param response The response stream to send the response on * @param request The HTTP request */ void StorageApi::storageTableQuery(shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string schemaName; string tableName; string payload; stats.commonQuery++; try { schemaName = request->path_match[STORAGE_SCHEMA_NAME_COMPONENT]; tableName = request->path_match[STORAGE_TABLE_NAME_COMPONENT]; payload = request->content.string(); char *pluginResult = plugin->commonRetrieve(tableName, payload, const_cast<char*>(schemaName.c_str())); if (pluginResult) { string res = pluginResult; respond(response, res); free(pluginResult); } else { string responsePayload; mapError(responsePayload, plugin->lastError()); respond(response, SimpleWeb::StatusCode::client_error_bad_request, responsePayload); } } catch (exception& ex) { internalError(response, ex); } } ================================================ FILE: C/services/storage/storage_plugin.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <config_category.h> #include <storage_plugin.h> #include <plugin_exception.h> using namespace std; #define DEFAULT_SCHEMA "fledge" /** * Constructor for the class that wraps the storage plugin * * Create a set of function points that resolve to the loaded plugin and * enclose in the class. */ StoragePlugin::StoragePlugin(const string& name, PLUGIN_HANDLE handle) : Plugin(handle), m_name(name), m_config(NULL) { // Call the init method of the plugin string version = this->getInfo()->interface; int major = strtol(version.c_str(), NULL, 10); size_t offset = version.find("."); int minor = 0; if (offset != string::npos) { minor = strtol(version.substr(offset + 1).c_str(), NULL, 10); } if (major > 1 || minor > 3) // Configuration starts at 1.4.0 of the interface { m_config = new StoragePluginConfiguration(name, this); PLUGIN_HANDLE (*pluginInit)(ConfigCategory *) = (PLUGIN_HANDLE (*)(ConfigCategory *)) manager->resolveSymbol(handle, "plugin_init"); ConfigCategory *config = m_config->getConfiguration(); instance = (*pluginInit)(config); delete config; } else { PLUGIN_HANDLE (*pluginInit)() = (PLUGIN_HANDLE (*)()) manager->resolveSymbol(handle, "plugin_init"); instance = (*pluginInit)(); } if (major >= 1 && minor >= 5) { m_bStorageSchemaFlag = true; } // Setup the function pointers to the plugin if (!m_bStorageSchemaFlag) { commonInsertPtr = (int (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_insert"); } else { storageSchemaInsertPtr = (int (*)(PLUGIN_HANDLE, const char*, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_insert"); } if (!m_bStorageSchemaFlag) { commonRetrievePtr = (char * (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_retrieve"); } else { storageSchemaRetrievePtr = (char * (*)(PLUGIN_HANDLE, const char*, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_retrieve"); } if (!m_bStorageSchemaFlag) { commonUpdatePtr = (int (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_update"); } else { storageSchemaUpdatePtr = (int (*)(PLUGIN_HANDLE, const char*, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_update"); } if (!m_bStorageSchemaFlag) { commonDeletePtr = (int (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_delete"); } else { storageSchemaDeletePtr = (int (*)(PLUGIN_HANDLE, const char*, const char*, const char*)) manager->resolveSymbol(handle, "plugin_common_delete"); } readingsAppendPtr = (int (*)(PLUGIN_HANDLE, const char *)) manager->resolveSymbol(handle, "plugin_reading_append"); readingsFetchPtr = (char * (*)(PLUGIN_HANDLE, unsigned long id, unsigned int blksize)) manager->resolveSymbol(handle, "plugin_reading_fetch"); readingsRetrievePtr = (char * (*)(PLUGIN_HANDLE, const char *)) manager->resolveSymbol(handle, "plugin_reading_retrieve"); readingsPurgePtr = (char * (*)(PLUGIN_HANDLE, unsigned long age, unsigned int flags, unsigned long sent)) manager->resolveSymbol(handle, "plugin_reading_purge"); readingsPurgeAssetPtr = (unsigned int (*)(PLUGIN_HANDLE, const char *)) manager->resolveSymbol(handle, "plugin_reading_purge_asset"); releasePtr = (void (*)(PLUGIN_HANDLE, const char *)) manager->resolveSymbol(handle, "plugin_release"); lastErrorPtr = (PLUGIN_ERROR * (*)(PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_last_error"); createTableSnapshotPtr = (int (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_create_table_snapshot"); loadTableSnapshotPtr = (int (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_load_table_snapshot"); deleteTableSnapshotPtr = (int (*)(PLUGIN_HANDLE, const char*, const char*)) manager->resolveSymbol(handle, "plugin_delete_table_snapshot"); getTableSnapshotsPtr = (char * (*)(PLUGIN_HANDLE, const char*)) manager->resolveSymbol(handle, "plugin_get_table_snapshots"); readingStreamPtr = (int (*)(PLUGIN_HANDLE, ReadingStream **, bool)) manager->resolveSymbol(handle, "plugin_readingStream"); pluginShutdownPtr = (bool (*)(PLUGIN_HANDLE))manager->resolveSymbol(handle, "plugin_shutdown"); createSchemaPtr = (int (*)(PLUGIN_HANDLE, const char*)) manager->resolveSymbol(handle, "plugin_createSchema"); } /** * Call the insert method in the plugin */ int StoragePlugin::commonInsert(const string& table, const string& payload, const char *schema) { if(!m_bStorageSchemaFlag && this->commonInsertPtr) { return this->commonInsertPtr(instance, table.c_str(), payload.c_str()); } else { if (this->storageSchemaInsertPtr) return this->storageSchemaInsertPtr(instance, schema ? schema : DEFAULT_SCHEMA, table.c_str(), payload.c_str()); } return 0; } /** * Call the retrieve method in the plugin */ char *StoragePlugin::commonRetrieve(const string& table, const string& payload, const char *schema) { if (!m_bStorageSchemaFlag && this->commonRetrievePtr) { return this->commonRetrievePtr(instance, table.c_str(), payload.c_str()); } else { if (this->storageSchemaRetrievePtr) return this->storageSchemaRetrievePtr(instance, schema ? schema : DEFAULT_SCHEMA, table.c_str(), payload.c_str()); } return NULL; } /** * Call the update method in the plugin */ int StoragePlugin::commonUpdate(const string& table, const string& payload, const char *schema) { if (!m_bStorageSchemaFlag && this->commonUpdatePtr) { return this->commonUpdatePtr(instance, table.c_str(), payload.c_str()); } else { if (this->storageSchemaUpdatePtr) return this->storageSchemaUpdatePtr(instance, schema ? schema : DEFAULT_SCHEMA, table.c_str(), payload.c_str()); } return 0; } /** * Call the delete method in the plugin */ int StoragePlugin::commonDelete(const string& table, const string& payload, const char *schema) { if (!m_bStorageSchemaFlag && this->commonDeletePtr) { return this->commonDeletePtr(instance, table.c_str(), payload.c_str()); } else { if (this->storageSchemaDeletePtr) return this->storageSchemaDeletePtr(instance, schema ? schema : DEFAULT_SCHEMA, table.c_str(), payload.c_str()); } return 0; } /** * Call the readings append method in the plugin */ int StoragePlugin::readingsAppend(const string& payload) { return this->readingsAppendPtr(instance, payload.c_str()); } /** * Call the readings fetch method in the plugin */ char * StoragePlugin::readingsFetch(unsigned long id, unsigned int blksize) { return this->readingsFetchPtr(instance, id, blksize); } /** * Call the readings retrieve method in the plugin */ char *StoragePlugin::readingsRetrieve(const string& payload) { return this->readingsRetrievePtr(instance, payload.c_str()); } /** * Call the readings purge method in the plugin */ char *StoragePlugin::readingsPurge(unsigned long age, unsigned int flags, unsigned long sent) { return this->readingsPurgePtr(instance, age, flags, sent); } /** * Call the readings purge asset method in the plugin */ char *StoragePlugin::readingsPurgeAsset(const string& asset) { if (this->readingsPurgeAssetPtr) { unsigned int purged = this->readingsPurgeAssetPtr(instance, asset.c_str()); char *json = (char *)malloc(80); if (json) { snprintf(json, 80, "{ \"purged\" : %u }", purged); return json; } else { throw runtime_error("Out of memory"); } } throw PluginNotImplementedException("Purge by asset name not implemented in the storage plugin"); } /** * Release a result from a retrieve */ void StoragePlugin::release(const char *results) { this->releasePtr(instance, results); } /** * Get the last error from the plugin */ PLUGIN_ERROR *StoragePlugin::lastError() { return this->lastErrorPtr(instance); } /** * Call the create table snaphot method in the plugin */ int StoragePlugin::createTableSnapshot(const string& table, const string& id) { return this->createTableSnapshotPtr(instance, table.c_str(), id.c_str()); } /** * Call the load table snaphot method in the plugin */ int StoragePlugin::loadTableSnapshot(const string& table, const string& id) { return this->loadTableSnapshotPtr(instance, table.c_str(), id.c_str()); } /** * Call the delete table snaphot method in the plugin */ int StoragePlugin::deleteTableSnapshot(const string& table, const string& id) { return this->deleteTableSnapshotPtr(instance, table.c_str(), id.c_str()); } /** * Call the get table snaphot method in the plugin */ char *StoragePlugin::getTableSnapshots(const string& table) { return this->getTableSnapshotsPtr(instance, table.c_str()); } /** * Call the reading stream method in the plugin */ int StoragePlugin::readingStream(ReadingStream **stream, bool commit) { return this->readingStreamPtr(instance, stream, commit); } /** * Call the shutdown entry point of the plugin */ bool StoragePlugin::pluginShutdown() { if (this->pluginShutdownPtr) return this->pluginShutdownPtr(instance); return true; } /** * Call the schema create method in the plugin */ int StoragePlugin::createSchema(const string& payload) { if (this->createSchemaPtr) return this->createSchemaPtr(instance, payload.c_str()); return 0; } ================================================ FILE: C/services/storage/storage_registry.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <rapidjson/document.h> #include "rapidjson/stringbuffer.h" #include <rapidjson/writer.h> #include "storage_registry.h" #include "client_http.hpp" #include "server_http.hpp" #include "management_api.h" #include "reading_set.h" #include "reading.h" #include "logger.h" #include "strings.h" #include "client_http.hpp" #include <chrono> #define CHECK_QTIMES 0 // Turn on to check length of time data is queued #define QTIME_THRESHOLD 3 // Threshold to report long queue times #define REGISTRY_SLEEP_TIME 5 // Time to sleep in the register process thread // between checks for chutdown using namespace std; using namespace rapidjson; using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; /** * Worker thread entry point */ static void worker(StorageRegistry *registry) { registry->run(); } /** * StorageRegistry constructor * * The storage registry holds registrations for other micro services * that wish to receive notifications when new data is avialable for * a given asset. The interested service registers a URL and an asset * code, or * for all assets, that URL will then be called when new * data arrives for the particular asset. * * The service registry maintians a worker thread that is responsible * for sending these notifications such that the main flow of data into * the storage layer is minimally impacted by the registration and * delivery of these messages to interested microservices. */ StorageRegistry::StorageRegistry() : m_thread(NULL) { m_running = true; m_thread = new thread(worker, this); } /** * StorageRegistry destructor */ StorageRegistry::~StorageRegistry() { m_running = false; m_cv.notify_all(); if (m_thread) { if (m_thread->joinable()) m_thread->join(); delete m_thread; m_thread = NULL; } while (!m_queue.empty()) m_queue.pop(); while (!m_tableInsertQueue.empty()) m_tableInsertQueue.pop(); while (!m_tableUpdateQueue.empty()) m_tableUpdateQueue.pop(); while (!m_tableDeleteQueue.empty()) m_tableDeleteQueue.pop(); } /** * Process a reading append payload and determine * if any microservice has registered an interest * in this asset. * * @param payload The reading append payload */ void StorageRegistry::process(const string& payload) { if (m_registrations.size() != 0) { /* * We have some registrations so queue a copy of the payload * to be examined in the thread the send reading notifications * to interested parties. */ char *data = NULL; if ((data = strdup(payload.c_str())) != NULL) { time_t now = time(0); Item item = make_pair(now, data); lock_guard<mutex> guard(m_qMutex); m_queue.push(item); m_cv.notify_all(); } } } /** * Process a table insert payload and determine * if any microservice has registered an interest * in this table. Called from StorageApi::commonInsert() * * @param payload The table insert payload */ void StorageRegistry::processTableInsert(const string& tableName, const string& payload) { Logger::getLogger()->debug("StorageRegistry::processTableInsert(): tableName=%s, payload=%s", tableName.c_str(), payload.c_str()); if (m_tableRegistrations.size() > 0) { /* * We have some registrations so queue a copy of the payload * to be examined in the thread the send table notifications * to interested parties. */ char *table = strdup(tableName.c_str()); char *data = strdup(payload.c_str()); if (data != NULL && table != NULL) { time_t now = time(0); TableItem item = make_tuple(now, table, data); lock_guard<mutex> guard(m_qMutex); m_tableInsertQueue.push(item); m_cv.notify_all(); } } } /** * Process a table update payload and determine * if any microservice has registered an interest * in this table. Called from StorageApi::commonUpdate() * * @param payload The table update payload */ void StorageRegistry::processTableUpdate(const string& tableName, const string& payload) { Logger::getLogger()->info("Checking for registered interest in table %s with update %s", tableName.c_str(), payload.c_str()); if (m_tableRegistrations.size() > 0) { /* * We have some registrations so queue a copy of the payload * to be examined in the thread the send table notifications * to interested parties. */ char *table = strdup(tableName.c_str()); char *data = strdup(payload.c_str()); if (data != NULL && table != NULL) { time_t now = time(0); TableItem item = make_tuple(now, table, data); lock_guard<mutex> guard(m_qMutex); m_tableUpdateQueue.push(item); m_cv.notify_all(); } } } /** * Process a table delete payload and determine * if any microservice has registered an interest * in this table. Called from StorageApi::commonDelete() * * @param payload The table delete payload */ void StorageRegistry::processTableDelete(const string& tableName, const string& payload) { Logger::getLogger()->info("Checking for registered interest in table %s with delete %s", tableName.c_str(), payload.c_str()); if (m_tableRegistrations.size() > 0) { /* * We have some registrations so queue a copy of the payload * to be examined in the thread the send table notifications * to interested parties. */ char *table = strdup(tableName.c_str()); char *data = strdup(payload.c_str()); if (data != NULL && table != NULL) { time_t now = time(0); TableItem item = make_tuple(now, table, data); lock_guard<mutex> guard(m_qMutex); m_tableDeleteQueue.push(item); m_cv.notify_all(); } } } /** * Handle a registration request from a client of the storage layer * * @param asset The asset of interest * @param url The URL to call */ void StorageRegistry::registerAsset(const string& asset, const string& url) { lock_guard<mutex> guard(m_registrationsMutex); m_registrations.push_back(pair<string *, string *>(new string(asset), new string(url))); } /** * Handle a request to remove a registration of interest * * @param asset The asset of interest * @param url The URL to call */ void StorageRegistry::unregisterAsset(const string& asset, const string& url) { lock_guard<mutex> guard(m_registrationsMutex); for (auto it = m_registrations.begin(); it != m_registrations.end(); ) { if (asset.compare(*(it->first)) == 0 && url.compare(*(it->second)) == 0) { delete it->first; delete it->second; it = m_registrations.erase(it); } else { ++it; } } } /** * Parse a table subscription (un)register JSON payload * * @param payload JSON payload describing the interest */ TableRegistration* StorageRegistry::parseTableSubscriptionPayload(const string& payload) { Document doc; doc.Parse(payload.c_str()); if (doc.HasParseError()) { Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): Parse error in subscription request payload"); return NULL; } if (!doc.HasMember("url")) { Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): subscription request doesn't have url field"); return NULL; } if (!doc.HasMember("key")) { Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): subscription request doesn't have url field"); return NULL; } if (!doc.HasMember("operation")) { Logger::getLogger()->error("StorageRegistry::parseTableSubscriptionPayload(): subscription request doesn't have url field"); return NULL; } TableRegistration *reg = new TableRegistration; reg->url = doc["url"].GetString(); reg->key = doc["key"].GetString(); reg->operation = doc["operation"].GetString(); if (reg->key.size()) { if (!doc.HasMember("values") || !doc["values"].IsArray()) { Logger::getLogger()->error("Subscription request" \ " doesn't have a proper values field, payload=%s", payload.c_str()); delete reg; return NULL; } for (auto & v : doc["values"].GetArray()) reg->keyValues.emplace_back(v.GetString()); } return reg; } /** * Handle a registration request for a table from a client of the storage layer * * @param table The table of interest * @param payload JSON payload describing the interest */ void StorageRegistry::registerTable(const string& table, const string& payload) { TableRegistration *reg = parseTableSubscriptionPayload(payload); if (!reg) { Logger::getLogger()->error("Unable to register invalid Registration entry for table %s, payload %s", table.c_str(), payload.c_str()); return; } lock_guard<mutex> guard(m_tableRegistrationsMutex); Logger::getLogger()->info("Adding registration entry for table %s", table.c_str()); m_tableRegistrations.push_back(pair<string *, TableRegistration *>(new string(table), reg)); } /** * Handle a request to remove a registration of interest in a table * * @param table The table of interest * @param payload JSON payload describing the interest */ void StorageRegistry::unregisterTable(const string& table, const string& payload) { TableRegistration *reg = parseTableSubscriptionPayload(payload); if (!reg) { Logger::getLogger()->info("Invalid Registration entry for table %s, payload %s", table.c_str(), payload.c_str()); return; } lock_guard<mutex> guard(m_tableRegistrationsMutex); Logger::getLogger()->info("%d entries registered interest in table operations", m_tableRegistrations.size()); bool found = false; for (auto it = m_tableRegistrations.begin(); found == false && it != m_tableRegistrations.end(); ) { TableRegistration *reg_it = it->second; if (table.compare(*(it->first)) == 0 && reg->url.compare(reg_it->url)==0 && reg->key.compare(reg_it->key)==0 && reg->operation.compare(reg_it->operation)==0) { // Either no key is to be matched or a key is to be matched against a possible set of values if (reg->key.size()==0 || (reg->key.size()>0 && reg->keyValues == reg_it->keyValues)) { delete it->first; delete it->second; it = m_tableRegistrations.erase(it); Logger::getLogger()->info("Removed registration for table %s and url %s", table, reg->key.c_str()); found = true; } else { ++it; } } else { ++it; } } if (!found) { Logger::getLogger()->warn( "Failed to remove subscription for table '%s' using key '%s' with operation '%s' and url '%s'", table.c_str(), reg->key.c_str(), reg->operation.c_str(), reg->url.c_str()); } delete reg; } /** * The worker function that processes the queue of payloads * that may need to be sent to subscribers. */ void StorageRegistry::run() { while (m_running) { char *data = NULL; #if CHECK_QTIMES time_t qTime; #endif { unique_lock<mutex> mlock(m_cvMutex); while (m_queue.size() == 0 && m_tableInsertQueue.size() == 0 && m_tableUpdateQueue.size() == 0 && m_tableDeleteQueue.size() == 0) { m_cv.wait_for(mlock, std::chrono::seconds(REGISTRY_SLEEP_TIME)); if (!m_running) { return; } } while (!m_queue.empty()) { Item item = m_queue.front(); m_queue.pop(); data = item.second; #if CHECK_QTIMES qTime = item.first; #endif if (data) { #if CHECK_QTIMES if (time(0) - qTime > QTIME_THRESHOLD) { Logger::getLogger()->error("Readings data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); } #endif processPayload(data); free(data); } } while (!m_tableInsertQueue.empty()) { char *tableName = NULL; TableItem item = m_tableInsertQueue.front(); m_tableInsertQueue.pop(); tableName = get<1>(item); data = get<2>(item); #if CHECK_QTIMES qTime = item.first; #endif if (tableName && data) { #if CHECK_QTIMES if (time(0) - qTime > QTIME_THRESHOLD) { Logger::getLogger()->error("Table insert data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); } #endif processInsert(tableName, data); free(tableName); free(data); } } while (!m_tableUpdateQueue.empty()) { char *tableName = NULL; TableItem item = m_tableUpdateQueue.front(); m_tableUpdateQueue.pop(); tableName = get<1>(item); data = get<2>(item); #if CHECK_QTIMES qTime = item.first; #endif if (tableName && data) { #if CHECK_QTIMES if (time(0) - qTime > QTIME_THRESHOLD) { Logger::getLogger()->error("Table update data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); } #endif processUpdate(tableName, data); free(tableName); free(data); } } while (!m_tableDeleteQueue.empty()) { char *tableName = NULL; TableItem item = m_tableDeleteQueue.front(); m_tableDeleteQueue.pop(); tableName = get<1>(item); data = get<2>(item); #if CHECK_QTIMES qTime = item.first; #endif if (tableName && data) { #if CHECK_QTIMES if (time(0) - qTime > QTIME_THRESHOLD) { Logger::getLogger()->error("Table delete data has been queued for %d seconds to be sent to registered party", (time(0) - qTime)); } #endif processDelete(tableName, data); free(tableName); free(data); } } } } } /** * Process an incoming payload and distribute as required to registered * services * * @param payload The payload to potentially distribute */ void StorageRegistry::processPayload(char *payload) { bool allDone = true; lock_guard<mutex> guard(m_registrationsMutex); // First of all deal with those that registered for all assets for (REGISTRY::const_iterator it = m_registrations.cbegin(); it != m_registrations.cend(); it++) { if (it->first->compare("*") == 0) { sendPayload(*(it->second), payload); } else { allDone = false; } } if (allDone) { // No registrations for individual assets, no need to parse payload processAssetRefusals(); return; } for (REGISTRY::const_iterator it = m_registrations.cbegin(); it != m_registrations.cend(); it++) { if (it->first->compare("*") != 0) { try { filterPayload(*(it->second), payload, *(it->first)); } catch (const exception& e) { Logger::getLogger()->error("filterPayload: exception %s", e.what()); } } } // Remove any registrations that are no longer listening processAssetRefusals(); } /** * Send the copy of the payload to the given URL * * @param url The URL to send the payload to * @param payload The payload to send */ void StorageRegistry::sendPayload(const string& url, const char *payload) { size_t found = url.find_first_of("://"); size_t found1 = url.find_first_of("/", found + 3); string hostport = url.substr(found+3, found1 - found - 3); string resource = url.substr(found1); HttpClient client(hostport); try { client.request("POST", resource, payload); } catch (const exception& e) { string why = e.what(); if (why.compare("Connection refused") == 0) { // The registered service is no longer listening // Log this for potential removal if the issue persists auto it = m_refusals.find(url); if (it != m_refusals.end()) m_refusals[url]++; else m_refusals[url] = 1; } else { Logger::getLogger()->error("sendPayload: exception %s sending reading data to interested party %s", why.c_str(), url.c_str()); } } } /** * Send a filtered copy of the payload to the given URL * * @param url The URL to send the payload to * @param payload The payload to send * @param asset The asset code to filter */ void StorageRegistry::filterPayload(const string& url, char *payload, const string& asset) { ostringstream convert; size_t found = url.find_first_of("://"); size_t found1 = url.find_first_of("/", found + 3); string hostport = url.substr(found+3, found1 - found - 3); string resource = url.substr(found1); // Filter the payload to include just the one asset Document doc; doc.Parse(payload); if (doc.HasParseError()) { Logger::getLogger()->error("filterPayload: Parse error in payload"); return; } if (!doc.HasMember("readings")) { Logger::getLogger()->error("filterPayload: payload has no readings object"); return; } const Value& readings = doc["readings"]; if (!readings.IsArray()) { Logger::getLogger()->error("filterPayload: payload readings object is not an array"); return; } convert << "{ \"readings\" : [ "; int count = 0; /* * Loop over the readings and create a reading object for * each, check if it matches the asset name and incldue in the * new payload if it does. In eother case free that object * immediately to reduce the memory requirement. */ for (auto& reading : readings.GetArray()) { if (reading.IsObject()) { JSONReading *value = new JSONReading(reading); if (value->getAssetName().compare(asset) == 0) { if (count) convert << ","; count++; convert << value->toJSON(); } delete value; } } convert << "] }"; /* * Check if any assets inthe filtered payload */ if (count == 0) { // Nothing to send return; } HttpClient client(hostport); try { client.request("POST", resource, convert.str()); } catch (const exception& e) { string why = e.what(); if (why.compare("Connection refused") == 0) { // The registered sewrvice is no longer listening // Log this for potential removal if the issue persists auto it = m_refusals.find(url); if (it != m_refusals.end()) m_refusals[url]++; else m_refusals[url] = 1; } else { Logger::getLogger()->error("filterPayload: exception %s sending reading data to interested party %s", why.c_str(), url.c_str()); } } } /** * Process an incoming payload and distribute as required to registered * services * * @param payload The payload to potentially distribute */ void StorageRegistry::processInsert(char *tableName, char *payload) { Logger::getLogger()->debug("StorageRegistry::processInsert(): Handling for table:%s, payload=%s", tableName, payload); Logger::getLogger()->debug("StorageRegistry::processInsert(): m_tableRegistrations.size()=%d", m_tableRegistrations.size()); Document payloadDoc; payloadDoc.Parse(payload); if (payloadDoc.HasParseError()) { Logger::getLogger()->error("Internal error unable to parse payload for insert into table %s, payload is %s", tableName, payload); return; } lock_guard<mutex> guard(m_tableRegistrationsMutex); for (auto & reg : m_tableRegistrations) { if (reg.first->compare(tableName) != 0) continue; TableRegistration *tblreg = reg.second; // If key is empty string, no need to match key/value pair in payload // Also operation must be "insert" for initial implementation if (tblreg->operation.compare("insert") != 0) { continue; } if (tblreg->key.size() == 0) { sendPayload(tblreg->url, payload); } else { if (payloadDoc.HasMember("inserts") && payloadDoc["inserts"].IsArray()) { // We have multiple inserts in the payload, parse each one and send // only the insert for which the key has been registered Value &inserts = payloadDoc["inserts"]; for (Value::ConstValueIterator iter = inserts.Begin(); iter != inserts.End(); ++iter) { if (iter->HasMember(tblreg->key.c_str())) { string payloadKeyValue = (*iter)[tblreg->key.c_str()].GetString(); if (std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), payloadKeyValue) != tblreg->keyValues.end()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); iter->Accept(writer); const char *output = buffer.GetString(); sendPayload(tblreg->url, output); } } } } else { if (payloadDoc.HasMember(tblreg->key.c_str()) && payloadDoc[tblreg->key.c_str()].IsString()) { string payloadKeyValue = payloadDoc[tblreg->key.c_str()].GetString(); if (std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), payloadKeyValue) != tblreg->keyValues.end()) { sendPayload(tblreg->url, payload); } } } } } processTableRefusals(); } /** * Process an incoming payload and distribute as required to registered * services * * @param payload The payload to potentially distribute */ void StorageRegistry::processUpdate(char *tableName, char *payload) { Document doc; doc.Parse(payload); if (doc.HasParseError()) { Logger::getLogger()->error("Unable to parse table update payload for table %s, request is %s", tableName, payload); return; } lock_guard<mutex> guard(m_tableRegistrationsMutex); for (auto & reg : m_tableRegistrations) { if (reg.first->compare(tableName) != 0) continue; TableRegistration *tblreg = reg.second; // If key is empty string, no need to match key/value pair in payload if (tblreg->operation.compare("update") != 0) { continue; } if (tblreg->key.empty()) { // No key to match, send all updates to table sendPayload(tblreg->url, payload); } else { if (doc.HasMember("updates") && doc["updates"].IsArray()) { // Multiple updates in a single call Value &updates = doc["updates"]; for (Value::ConstValueIterator iter = updates.Begin(); iter != updates.End(); ++iter) { const Value& where = (*iter)["where"]; if (where.HasMember("column") && where["column"].IsString() && where.HasMember("value") && where["value"].IsString()) { string updateKey = where["column"].GetString(); string keyValue = where["value"].GetString(); if (updateKey.compare(tblreg->key) == 0 && std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), keyValue) != tblreg->keyValues.end()) { if (iter->HasMember("values")) { const Value& values = (*iter)["values"]; StringBuffer buffer; Writer<StringBuffer> writer(buffer); values.Accept(writer); const char *output = buffer.GetString(); sendPayload(tblreg->url, output); } else if (iter->HasMember("expressions")) { const Value& expressions = (*iter)["expressions"]; for (Value::ConstValueIterator expr = expressions.Begin(); expr != expressions.End(); ++expr) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); expr->Accept(writer); const char *output = buffer.GetString(); sendPayload(tblreg->url, output); } } } } } } else if (doc.HasMember("where") && doc["where"].IsObject()) { const Value& where = doc["where"]; if (where.HasMember("column") && where["column"].IsString() && where.HasMember("value") && where["value"].IsString()) { string updateKey = where["column"].GetString(); string keyValue = where["value"].GetString(); if (updateKey.compare(tblreg->key) == 0 && std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), keyValue) != tblreg->keyValues.end()) { if (doc.HasMember("values")) { const Value& values = doc["values"]; StringBuffer buffer; Writer<StringBuffer> writer(buffer); values.Accept(writer); const char *output = buffer.GetString(); sendPayload(tblreg->url, output); } else if (doc.HasMember("expressions")) { const Value& expressions = doc["expressions"]; for (Value::ConstValueIterator expr = expressions.Begin(); expr != expressions.End(); ++expr) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); expr->Accept(writer); const char *output = buffer.GetString(); sendPayload(tblreg->url, output); } } } } } } } } /** * Process an incoming payload and distribute as required to registered * services * * @param payload The payload to potentially distribute */ void StorageRegistry::processDelete(char *tableName, char *payload) { Document doc; bool allRows = false; if (! *payload) // Empty { allRows = true; } else { doc.Parse(payload); if (doc.HasParseError()) { Logger::getLogger()->error("Unable to parse table delete payload for table %s, request is %s", tableName, payload); return; } } lock_guard<mutex> guard(m_tableRegistrationsMutex); for (auto & reg : m_tableRegistrations) { if (reg.first->compare(tableName) != 0) continue; TableRegistration *tblreg = reg.second; // If key is empty string, no need to match key/value pair in payload if (tblreg->operation.compare("delete") != 0) { continue; } if (allRows) { sendPayload(tblreg->url, payload); } else if (tblreg->key.empty()) { // No key to match, send all updates to table sendPayload(tblreg->url, payload); } else { if (doc.HasMember("where") && doc["where"].IsObject()) { const Value& where = doc["where"]; if (where.HasMember("column") && where["column"].IsString() && where.HasMember("value") && where["value"].IsString()) { string updateKey = where["column"].GetString(); string keyValue = where["value"].GetString(); if (updateKey.compare(tblreg->key) == 0 && std::find(tblreg->keyValues.begin(), tblreg->keyValues.end(), keyValue) != tblreg->keyValues.end()) { StringBuffer buffer; Writer<StringBuffer> writer(buffer); where.Accept(writer); const char *output = buffer.GetString(); sendPayload(tblreg->url, output); } } } } } } /** * Unregister a registration of interest in assets after a number of refusals. * Called holding the m_registrationsMutex * * A certain number of refused connections is tolerated to allow for * transient faults. */ void StorageRegistry::processAssetRefusals() { REGISTRY newRegistry; if (m_refusals.empty()) { return; } for (auto& item : m_registrations) { string url = *item.second; auto refusal = m_refusals.find(url); if (refusal != m_refusals.end()) { int cnt = m_refusals[url]; if (cnt < MAX_REFUSALS) { newRegistry.push_back(item); } else { Logger::getLogger()->info("Removing registration for %s with URL %s. Service has probably failed", item.first->c_str(), url); m_refusals.erase(refusal); delete item.first; delete item.second; } } else { newRegistry.push_back(item); } } m_registrations.clear(); m_registrations = newRegistry; } /** * Unregister a registration for interest in tables after a number of refusals. * Called holding the m_tableRegistrationsMutex * * A certain number of refused connections is tolerated to allow for * transient faults. */ void StorageRegistry::processTableRefusals() { REGISTRY_TABLE newRegistry; if (m_refusals.empty()) { return; } for (auto& item : m_tableRegistrations) { string url = item.second->url; auto refusal = m_refusals.find(url); if (refusal != m_refusals.end()) { int cnt = m_refusals[url]; if (cnt < MAX_REFUSALS) { newRegistry.push_back(item); } else { Logger::getLogger()->info("Removing registration for table %s with URL %s. Service has probably failed", item.first->c_str(), url); m_refusals.erase(refusal); delete item.first; delete item.second; } } else { newRegistry.push_back(item); } } m_tableRegistrations.clear(); m_tableRegistrations = newRegistry; } /** * Test function to add some dummy/test table subscriptions */ void StorageRegistry::insertTestTableReg() { string table1("log"); string payload1 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl", "key": "code", "values":["CONAD", "PURGE", "CONCH", "FSTOP", "SRVRG"], "operation": "insert"} )***"; string table2("asset_tracker"); string payload2 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl2", "key": "", "operation": "insert"} )***"; string table3("asset_tracker"); string payload3 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl3", "key": "event", "values":["Ingest", "Filter"], "operation": "insert"} )***"; Logger::getLogger()->error("StorageRegistry::insertTestTableReg(): table=%s, payload=%s", table1.c_str(), payload1.c_str()); registerTable(table1, payload1); Logger::getLogger()->error("StorageRegistry::insertTestTableReg(): table=%s, payload=%s", table2.c_str(), payload2.c_str()); registerTable(table2, payload2); Logger::getLogger()->error("StorageRegistry::insertTestTableReg(): table=%s, payload=%s", table3.c_str(), payload3.c_str()); registerTable(table3, payload3); } /** * Test function to remove a dummy/test table subscription * * @param n The subscription number to remove */ void StorageRegistry::removeTestTableReg(int n) { string table1("log"); string payload1 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl", "key": "code", "values":["CONAD", "PURGE", "CONCH", "FSTOP", "SRVRG"], "operation": "insert"} )***"; string table2("asset_tracker"); string payload2 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl2", "key": "", "operation": "insert"} )***"; string table3("asset_tracker"); string payload3 = R"***( {"url": "http://localhost:8081/dummyTableNotifyUrl3", "key": "event", "values":["Ingest", "Filter"], "operation": "insert"} )***"; switch(n) { case 1: unregisterTable(table1, payload1); Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): table=%s, payload=%s", table1.c_str(), payload1.c_str()); break; case 2: unregisterTable(table2, payload2); Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): table=%s, payload=%s", table2.c_str(), payload2.c_str()); break; case 3: unregisterTable(table3, payload3); Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): table=%s, payload=%s", table3.c_str(), payload3.c_str()); break; default: Logger::getLogger()->error("StorageRegistry::removeTestTableReg(): unhandled value n=%d", n); break; } } ================================================ FILE: C/services/storage/storage_stats.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2017 OSisoft, LLC * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <storage_stats.h> #include <string> #include <sstream> using namespace std; /** * Construct the statistics class for the storage service. */ StorageStats::StorageStats() : commonInsert(0), commonSimpleQuery(0), commonQuery(0), commonUpdate(0), commonDelete(0), readingAppend(0), readingFetch(0), readingQuery(0), readingPurge(0) { } /** * Serialise the statistics as JSON */ void StorageStats::asJSON(string& json) const { ostringstream convert; // stream used for the conversion convert << "{ \"commonInsert\" : " << commonInsert << ","; convert << " \"commonSimpleQuery\" : " << commonSimpleQuery << ","; convert << " \"commonQuery\" : " << commonQuery << ","; convert << " \"commonUpdate\" : " << commonUpdate << ","; convert << " \"commonDelete\" : " << commonDelete << ","; convert << " \"readingAppend\" : " << readingAppend << ","; convert << " \"readingFetch\" : " << readingFetch << ","; convert << " \"readingQuery\" : " << readingQuery << ","; convert << " \"readingPurge\" : " << readingPurge << " }"; json = convert.str(); } ================================================ FILE: C/services/storage/stream_handler.cpp ================================================ /* * Fledge storage service. * * Copyright (c) 2019 Dianomic Systems Inc * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <stream_handler.h> #include <storage_api.h> #include <storage_api.h> #include <reading_stream.h> #include <netinet/in.h> #include <arpa/inet.h> #include <fcntl.h> #include <sys/epoll.h> #include <sys/ioctl.h> #include <chrono> #include <unistd.h> #include <errno.h> using namespace std; /** * C wrapper for the handler thread we use to handle the polling of * the stream ingestion protocol. * * @param handler The SgtreamHanler instance that started this thread */ static void threadWrapper(void *handler) { ((StreamHandler *)handler)->handler(); } /** * Constructor for the StreamHandler class */ StreamHandler::StreamHandler(StorageApi *api) : m_api(api), m_running(true) { m_pollfd = epoll_create(1); m_handlerThread = thread(threadWrapper, this); } /** * Destructor for the StreamHandler. Close down the epoll * system and wait for the handler thread to terminate. */ StreamHandler::~StreamHandler() { m_running = false; close(m_pollfd); m_handlerThread.join(); } /** * The handler method for the stream handler. This is run in its own thread * and is responsible for using epoll to gather events on the descriptors and * to dispatch them to the individual streams */ void StreamHandler::handler() { struct epoll_event events[MAX_EVENTS]; while (m_running) { std::unique_lock<std::mutex> lock(m_streamsMutex); if (m_streams.size() == 0) { Logger::getLogger()->debug("Waiting for first stream to be created"); m_streamsCV.wait_for(lock, chrono::milliseconds(500)); } else { /* * Call epoll_wait with a zero timeout to see if any data is available. * If not then call with a tiemout. This prevents Linux from scheduling * us out if there is data on the socket. */ int nfds = epoll_wait(m_pollfd, events, MAX_EVENTS, 100); if (nfds == 0) { nfds = epoll_wait(m_pollfd, events, MAX_EVENTS, 100); } if (nfds == -1) { Logger::getLogger()->error("Stream epoll error: %s", strerror(errno)); } else { for (int i = 0; i < nfds; i++) { Stream *stream = (Stream *)events[i].data.ptr; stream->handleEvent(m_pollfd, m_api, events[i].events); } } } } } /** * Create a new stream and add it to the epoll mechanism for the stream handler * * @param token The single use connection token the client should send * @param The port on which this stream is listening */ uint32_t StreamHandler::createStream(uint32_t *token) { Stream *stream = new Stream(); uint32_t port = stream->create(m_pollfd, token); { std::unique_lock<std::mutex> lock(m_streamsMutex); m_streams.push_back(stream); } m_streamsCV.notify_all(); return port; } /** * Create a stream object to deal with the stream protocol */ StreamHandler::Stream::Stream() : m_status(Closed) { } /** * Destroy a stream */ StreamHandler::Stream::~Stream() { delete m_blockPool; } /** * Create a new stream object. Add that stream to the epoll structure. * A listener socket is created and the port sent back to the caller. The client * will connect to this port and then send the token to verify they are the * service that requested the stream to be connected. * * The client calls a REST API endpoint in the storage layer to request a streaming * connection which results in this method beign called. * * @param epollfd The epoll descriptor * @param token The single use token the client will send in the connect request */ uint32_t StreamHandler::Stream::create(int epollfd, uint32_t *token) { struct sockaddr_in address; // Create the memory pool from whuch readings will be allocated if ((m_blockPool = new MemoryPool(BLOCK_POOL_SIZES)) == NULL) { Logger::getLogger()->error("Failed to create memory block pool"); return 0; } // Open the socket used to listen for the incoming stream connection if ((m_socket = socket(AF_INET, SOCK_STREAM, 0)) < 0) { Logger::getLogger()->error("Failed to create socket: %s", strerror(errno)); return 0; } address.sin_family = AF_INET; address.sin_addr.s_addr = INADDR_ANY; address.sin_port = 0; if (bind(m_socket, (struct sockaddr *)&address, sizeof(address)) < 0) { Logger::getLogger()->error("Failed to bind socket: %s", strerror(errno)); return 0; } socklen_t len = sizeof(address); if (getsockname(m_socket, (struct sockaddr *)&address, &len) == -1) Logger::getLogger()->error("Failed to get socket name, %s", strerror(errno)); m_port = ntohs(address.sin_port); Logger::getLogger()->info("Stream port bound to %d", m_port); setNonBlocking(m_socket); if (listen(m_socket, 3) < 0) { Logger::getLogger()->error("Failed to listen: %s", strerror(errno)); return 0; } m_status = Listen; // Create the random token that is used to verify the connection comes from the // source that requested the streaming connection srand(m_port + (unsigned int)time(0)); m_token = (uint32_t)random() & 0xffffffff; *token = m_token; // Add to epoll set m_event.data.ptr = this; m_event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLPRI | EPOLLERR; if (epoll_ctl(epollfd, EPOLL_CTL_ADD, m_socket, &m_event) < 0) { Logger::getLogger()->error("Failed to add listening port %d to epoll fileset, %s", m_port, strerror(errno)); } return m_port; } /** * Set the file descriptor to be non blocking * * @param fd The file descripter to set non-blocking */ void StreamHandler::Stream::setNonBlocking(int fd) { int flags; flags = fcntl(fd, F_GETFL, 0); flags |= O_NONBLOCK; fcntl(fd, F_SETFL, flags); } /** * Handle an epoll event. The precise handling will depend * on the state of the stream. * * One of the things done here is to handle the streaming protocol, * reading the block header the individual reading headers and the * readings themselves. * * TODO Improve memory handling, use seperate threads for inserts, send acknowledgements * * @param epollfd The epoll file descriptor */ void StreamHandler::Stream::handleEvent(int epollfd, StorageApi *api, uint32_t events) { ssize_t n; if (events & EPOLLRDHUP) { // TODO mark this stream for destruction epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); Logger::getLogger()->error("Closing stream..."); m_status = Closed; } if (events & EPOLLHUP) { // TODO mark this stream for destruction epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); Logger::getLogger()->error("Hangup on socket Closing stream..."); m_status = Closed; } if (events & EPOLLPRI) { // TODO mark this stream for destruction epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); Logger::getLogger()->error("Eceptional condition on socket Closing stream..."); m_status = Closed; } if (events & EPOLLERR) { // TODO mark this stream for destruction epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); m_status = Closed; Logger::getLogger()->error("Error condition on socket Closing stream..."); } if (events & EPOLLIN) { if (m_status == Listen) { // Accept the connection for the streaming data int conn_sock; struct sockaddr addr; socklen_t addrlen = sizeof(addr); if ((conn_sock = accept(m_socket, (struct sockaddr *)&addr, &addrlen)) == -1) { Logger::getLogger()->info("Accept failed for streaming socket: %s", strerror(errno)); return; } // Remove and close the listening socket now we have a connection epoll_ctl(epollfd, EPOLL_CTL_DEL, m_socket, &m_event); close(m_socket); Logger::getLogger()->info("Stream connection established"); m_socket = conn_sock; m_status = AwaitingToken; m_event.events = EPOLLIN | EPOLLRDHUP | EPOLLHUP | EPOLLERR | EPOLLPRI | EPOLLET; m_event.data.ptr = this; if (epoll_ctl(epollfd, EPOLL_CTL_ADD, m_socket, &m_event) == -1) { Logger::getLogger()->fatal("Failed to add data socket to epoll set: %s", strerror(errno)); } } else if (m_status == AwaitingToken) { RDSConnectHeader hdr; if (available(m_socket) < sizeof(hdr)) { return; } if ((n = read(m_socket, &hdr, sizeof(hdr))) != (int)sizeof(hdr)) { Logger::getLogger()->warn("Token exchange failed: Short read of %d bytes: %s", n, strerror(errno)); return; } if (hdr.magic == RDS_CONNECTION_MAGIC && hdr.token == m_token) { m_status = Connected; m_blockNo = 0; m_readingNo = 0; m_protocolState = BlkHdr; Logger::getLogger()->info("Token for streaming socket exchanged"); } else { Logger::getLogger()->warn("Incorrect token for streaming socket"); close(m_socket); } } else if (m_status == Connected) { /* * We are connected so loop on the available data reading block headers, * reading headers and the readings themselves. * * We use the available method to see if there is enough data before we * read in order to avoid blocking in a red call. This also allows to * not have to set the socket to non-blocking mode. meaning that our * epoll interaction does not need to be edge triggered. * * Once we exhaust the data that is aviaabnle we return and allow the * epoll to inform us when more data becomes available. */ while (1) { Logger::getLogger()->debug("Connected in protocol state %d, readingNo %d", m_protocolState, m_readingNo); if (m_protocolState == BlkHdr) { RDSBlockHeader blkHdr; if (available(m_socket) < sizeof(blkHdr)) { Logger::getLogger()->debug("Not enough bytes for block header"); return; } if ((n = read(m_socket, &blkHdr, sizeof(blkHdr))) != (int)sizeof(blkHdr)) { // This should never happen as avialable said we had enough data Logger::getLogger()->warn("Block Header: Short read of %d bytes: %s", n, strerror(errno)); return; } if (blkHdr.magic != RDS_BLOCK_MAGIC) { Logger::getLogger()->error("Expected block header %d, but incorrect header found 0x%x", m_blockNo, blkHdr.magic); Logger::getLogger()->error("Previous block size was %d", m_blockSize); dump(10); close(m_socket); return; } if (blkHdr.blockNumber != m_blockNo) { // Somehow we lost a block } m_blockNo++; m_blockSize = blkHdr.count; m_protocolState = RdHdr; m_readingNo = 0; Logger::getLogger()->info("New block %d of %d readings", blkHdr.blockNumber, blkHdr.count); } else if (m_protocolState == RdHdr) { // We are expecting a reading header RDSReadingHeader rdhdr; if (available(m_socket) < sizeof(rdhdr)) { Logger::getLogger()->warn("Not enough bytes %d for reading header %d in block %d (socket %d)", available(m_socket), m_readingNo, m_blockNo - 1, m_socket); static bool reported = false; if (!reported) { char buf[40]; int i; i = recv(m_socket, buf, sizeof(buf), MSG_PEEK); for (int j = 0; j < i; j++) Logger::getLogger()->warn("Byte at %d is %x", j, buf[j]); reported = true; } return; } int n; if ((n = read(m_socket, &rdhdr, sizeof(rdhdr))) < (int)sizeof(rdhdr)) { // Should never happen Logger::getLogger()->warn("Not enough bytes read %d for reading header", n); return; } if (rdhdr.magic != RDS_READING_MAGIC) { Logger::getLogger()->error("Expected reading header %d of %d in block %d, but incorrect header found 0x%x", m_readingNo, m_blockSize, m_blockNo, rdhdr.magic); dump(10); close(m_socket); return; } Logger::getLogger()->debug("Reading Header: assetCodeLngth %d, payloadLength %d", rdhdr.assetLength, rdhdr.payloadLength); m_readingSize = sizeof(struct timeval) + rdhdr.assetLength + rdhdr.payloadLength; uint32_t extra = 0; if (rdhdr.assetLength) { m_sameAsset = false; extra = 0; } else { m_sameAsset = true; extra = m_lastAsset.length() + 1; rdhdr.assetLength = extra; } extra += 2 * sizeof(uint32_t); m_currentReading = (ReadingStream *)m_blockPool->allocate(m_readingSize + extra); m_readings[m_readingNo % RDS_BLOCK] = m_currentReading; m_currentReading->assetCodeLength = rdhdr.assetLength; m_currentReading->payloadLength = rdhdr.payloadLength; m_protocolState = RdBody; } else if (m_protocolState == RdBody) { // We are expecting a reading body if (available(m_socket) < m_readingSize) { Logger::getLogger()->warn("Not enough bytes %d for reading %d in block %d", m_readingSize, m_readingNo, m_blockNo - 1); return; } struct iovec iov[3]; iov[0].iov_base = &m_currentReading->userTs; iov[0].iov_len = sizeof(struct timeval); if (!m_sameAsset) { iov[1].iov_base = &m_currentReading->assetCode; iov[1].iov_len = m_currentReading->assetCodeLength; iov[2].iov_base = &m_currentReading->assetCode[m_currentReading->assetCodeLength]; iov[2].iov_len = m_currentReading->payloadLength; long n = readv(m_socket, iov, 3); if ((unsigned long)n != m_currentReading->assetCodeLength + m_currentReading->payloadLength + sizeof(struct timeval)) { Logger::getLogger()->error("Short read for reading"); } m_lastAsset = m_currentReading->assetCode; } else { iov[1].iov_base = &m_currentReading->assetCode[m_currentReading->assetCodeLength]; iov[1].iov_len = m_currentReading->payloadLength; long n = readv(m_socket, iov, 2); if ((unsigned long)n != m_currentReading->payloadLength + sizeof(struct timeval)) { Logger::getLogger()->error("Short read for reading"); } memcpy(&m_currentReading->assetCode[0], m_lastAsset.c_str(), m_currentReading->assetCodeLength); } m_readingNo++; m_protocolState = RdHdr; if ((m_readingNo % RDS_BLOCK) == 0) { queueInsert(api, RDS_BLOCK, false); for (int i = 0; i < RDS_BLOCK; i++) m_blockPool->release(m_readings[i]); } else if (m_readingNo == m_blockSize) { // We have completed the block, insert readings and wait // for a block header queueInsert(api, m_readingNo % RDS_BLOCK, true); for (uint32_t i = 0; i < m_readingNo % RDS_BLOCK; i++) m_blockPool->release(m_readings[i]); m_protocolState = BlkHdr; Logger::getLogger()->warn("Waiting for the next block header"); } else if (m_readingNo > m_blockSize) { Logger::getLogger()->error("Too many readings in block"); } } } } } } /** * Queue a block of readings to be inserted into the database. The readings * are available via the m_readings array. * * @param nReadings The number of readings to insert * @param commit Perform commit at end of this block */ void StreamHandler::Stream::queueInsert(StorageApi *api, unsigned int nReadings, bool commit) { m_readings[nReadings] = NULL; api->readingStream(m_readings, commit); } /** * Return the number of bytes available to read on the * given file descriptor * * @param fd The file descriptor to check */ unsigned int StreamHandler::Stream::available(int fd) { unsigned int avail; if (ioctl(fd, FIONREAD, &avail) < 0) { Logger::getLogger()->warn("FIONREAD failed: %s", strerror(errno)); return 0; } return avail; } /** * Block memory pool destructor. Return any memory from the memory pools * to the system. */ StreamHandler::Stream::MemoryPool::~MemoryPool() { for (auto it = m_pool.begin(); it != m_pool.end(); it++) { while (! it->second->empty()) { void *mem = it->second->back(); it->second->pop_back(); free(&((size_t *)mem)[-1]); } delete it->second; } } /** * Allocate a buffer from the block pool * * @param size Minimum size of block to allocate */ void *StreamHandler::Stream::MemoryPool::allocate(size_t size) { size = rndSize(size); auto blkpool = m_pool.find(size); if (blkpool == m_pool.end()) { Logger::getLogger()->info("No block pool for %d bytes, creating", size); // Create a new memory pool createPool(size); blkpool = m_pool.find(size); } if (blkpool->second->empty()) { Logger::getLogger()->warn("Extending block pool for %d bytes", size); growPool(blkpool->second, size); } void *memory = blkpool->second->back(); blkpool->second->pop_back(); return memory; } /** * Release memory back to the memory pool * * @param memory The memory to release */ void StreamHandler::Stream::MemoryPool::release(void *memory) { size_t poolSize = ((size_t *)memory)[-1]; auto blkpool = m_pool.find(poolSize); if (blkpool == m_pool.end()) { Logger::getLogger()->fatal("Returning memory to a block pool (%d) that does not exist", poolSize); throw runtime_error("Invalid block pool"); } blkpool->second->push_back(memory); } /** * Allocate a new memory block pool * * @param size Size of the memory blocks in the pool */ void StreamHandler::Stream::MemoryPool::createPool(size_t size) { size_t realSize = size + sizeof(size_t); vector<void *> *blocks = new vector<void *>; for (int i = 0; i < RDS_BLOCK; i++) { size_t *mem = (size_t *)malloc(realSize); blocks->push_back(&mem[1]); mem[0] = size; } m_pool.insert(pair<int, vector<void *>* >(size, blocks)); } /** * Grow the memory pool for this size block * * @param pool The memory pool * @param size The size of the blocks in the memory pool */ void StreamHandler::Stream::MemoryPool::growPool(vector<void *> *pool, size_t size) { size_t realSize = size + sizeof(size_t); for (int i = 0; i < RDS_BLOCK; i++) { size_t *mem = (size_t *)malloc(realSize); pool->push_back(&mem[1]); mem[0] = size; } } /** * Diagnostic routine to display stream content. * * @param n Number of lines to display */ void StreamHandler::Stream::dump(int n) { char buf[132]; char data[10]; while (n--) { buf[0] = 0; int r = read(m_socket, data, 10); for (int i = 0; i < r; i++) { char one[8]; snprintf(one, sizeof(one), "0x%02x ", data[i]); strcat(buf, one); } Logger::getLogger()->error(buf); } } ================================================ FILE: C/tasks/check_updates/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (check_updates) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(COMMON_LIB common-lib) set(PLUGINS_COMMON_LIB plugins-common-lib) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) include_directories(.) include_directories(include) include_directories(../../thirdparty/Simple-Web-Server) include_directories(../../thirdparty/rapidjson/include) include_directories(../../common/include) file(GLOB check_updates_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(${PROJECT_NAME} ${check_updates_src} ${common_src}) target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES}) target_link_libraries(${PROJECT_NAME} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${PLUGINS_COMMON_LIB}) install(TARGETS check_updates RUNTIME DESTINATION fledge/tasks) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(check_updates ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(check_updates ws2_32 wsock32) endif() endif() ================================================ FILE: C/tasks/check_updates/check_updates.cpp ================================================ /* * Fledge Check Updates * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <check_updates.h> #include <logger.h> #include <cstdlib> #include <thread> #include <csignal> #include <fstream> #include <errno.h> #include <cstring> #include <sstream> using namespace std; volatile std::sig_atomic_t signalReceived = 0; static void signalHandler(int signal) { signalReceived = signal; } /** * Constructor for CheckUpdates */ CheckUpdates::CheckUpdates(int argc, char** argv) : FledgeProcess(argc, argv) { std::string paramName; paramName = getName(); m_logger = Logger::getLogger(); // Logger is created by FledgeProcess m_logger->info("CheckUpdates starting - parameters name :%s:", paramName.c_str() ); m_mgtClient = this->getManagementClient(); } /** * Destructor for CheckUpdates */ CheckUpdates::~CheckUpdates() { } /** * CheckUpdates run method, called by the base class to start the process and do the actual work. */ void CheckUpdates::run() { // We handle these signals, add more if needed std::signal(SIGINT, signalHandler); std::signal(SIGSTOP, signalHandler); std::signal(SIGTERM, signalHandler); if (!m_dryRun) { raiseAlerts(); } processEnd(); } /** * Execute the raiseAlerts, create an alert for all the packages for which update is available */ void CheckUpdates::raiseAlerts() { m_logger->debug("raiseAlerts running"); try { int availableUpdates = getUpgradablePackageList().size(); if (availableUpdates > 0) { std::string key = "package_updates"; std::string message = ""; if (availableUpdates == 1) message = "There is " + std::to_string(availableUpdates) + " update available to be installed"; else message = "There are " + std::to_string(availableUpdates) + " updates available to be installed"; std::string urgency = "normal"; if (!m_mgtClient->raiseAlert(key,message,urgency)) { m_logger->error("Failed to raise an alert for key=%s,message=%s,urgency=%s", key.c_str(), message.c_str(), urgency.c_str()); } } } catch (...) { try { std::exception_ptr p = std::current_exception(); std::rethrow_exception(p); } catch(const std::exception& e) { m_logger->error("Failed to raise alert : %s", e.what()); } } } /** * Logs process end message */ void CheckUpdates::processEnd() { m_logger->debug("raiseAlerts completed"); } /** * Fetch package manager name */ std::string CheckUpdates::getPackageManager() { std::string command = "command -v yum || command -v apt-get"; std::string result = ""; char buffer[128]; // Open pipe to file FILE* pipe = popen(command.c_str(), "r"); if (!pipe) { m_logger->error("getPackageManager: popen call failed : %s",strerror(errno)); return ""; } // read till end of process: while (!feof(pipe)) { if (fgets(buffer, 128, pipe) != NULL) result += buffer; } pclose(pipe); if (result.find("apt") != std::string::npos) return "apt"; if (result.find("yum") != std::string::npos) return "yum"; m_logger->warn("Unspported environment %s", result.c_str() ); return ""; } /** * Fetch a list of all the package name for which upgrade is available */ std::vector<std::string> CheckUpdates::getUpgradablePackageList() { std::string packageManager = getPackageManager(); std::vector<std::string> packageList; if(!packageManager.empty()) { std::string command = "(sudo apt update && sudo apt list --upgradeable) 2>/dev/null | grep -v '^fledge-manage' | grep '^fledge' | tr -s ' ' | cut -d' ' -f-1,2 "; if (packageManager.find("yum") != std::string::npos) { command = "(sudo yum check-update && sudo yum list updates) 2>/dev/null | grep -v '^fledge-manage' | grep '^fledge' | tr -s ' ' | cut -d' ' -f-1,2 "; } FILE* pipe = popen(command.c_str(), "r"); if (!pipe) { m_logger->error("getUpgradablePackageList: popen call failed : %s",strerror(errno)); return packageList; } char buffer[1024]; while (!feof(pipe)) { if (fgets(buffer, sizeof(buffer), pipe) != NULL) { //strip out newline character int len = strlen(buffer) - 1; if (*buffer && buffer[len] == '\n') buffer[len] = '\0'; packageList.emplace_back(buffer); } } pclose(pipe); } return packageList; } ================================================ FILE: C/tasks/check_updates/include/check_updates.h ================================================ #ifndef _CHECK_UPDATES_H #define _CHECK_UPDATES_H /* * Fledge Check Updates * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <process.h> #define LOG_NAME "check_updates" /** * CheckUpdates class */ class CheckUpdates : public FledgeProcess { public: CheckUpdates(int argc, char** argv); ~CheckUpdates(); void run(); private: Logger *m_logger; ManagementClient *m_mgtClient; void raiseAlerts(); std::string getPackageManager(); std::vector<std::string> getUpgradablePackageList(); void processEnd(); }; #endif ================================================ FILE: C/tasks/check_updates/main.cpp ================================================ /* * Fledge Check Updates * * Copyright (c) 2024 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Devki Nandan Ghildiyal */ #include <check_updates.h> #include <logger.h> using namespace std; int main(int argc, char** argv) { try { CheckUpdates check(argc, argv); check.run(); } catch (...) { try { std::exception_ptr p = std::current_exception(); std::rethrow_exception(p); } catch(const std::exception& e) { Logger::getLogger()->error("An error occurred during the execution : %s", e.what()); } exit(1); } // Return success exit(0); } ================================================ FILE: C/tasks/north/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (Fledge_tasks_north) set(CMAKE_CXX_FLAGS "-std=c++11 -O3") add_subdirectory(sending_process) ================================================ FILE: C/tasks/north/sending_process/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (sending_process) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(DLLIB -ldl) set(UUIDLIB -luuid) set(COMMON_LIB common-lib) set(SERVICE_COMMON_LIB services-common-lib) set(PLUGINS_COMMON_LIB plugins-common-lib) include_directories(. include ../../../thirdparty/Simple-Web-Server ../../../thirdparty/rapidjson/include ../../../common/include ../../../services/common/include ../../../plugins/common/include) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) # Late 2017 TODO: remove the following checks and always use std::regex if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) set(BOOST_COMPONENTS ${BOOST_COMPONENTS} regex) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DUSE_BOOST_REGEX") endif() endif() find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() file(GLOB sending_process_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../../lib) add_executable(sending_process ${sending_process_src}) target_link_libraries(sending_process ${Boost_LIBRARIES}) target_link_libraries(sending_process ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(sending_process ${DLLIB}) target_link_libraries(sending_process ${UUIDLIB}) target_link_libraries(sending_process -lssl -lcrypto) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${SERVICE_COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${PLUGINS_COMMON_LIB}) install(TARGETS sending_process RUNTIME DESTINATION fledge/tasks) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(sending_process ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(sending_process ws2_32 wsock32) endif() endif() ================================================ FILE: C/tasks/north/sending_process/include/north_filter_pipeline.h ================================================ #ifndef _NORTH_FILTER_PIPELINE_H #define _NORTH_FILTER_PIPELINE_H /* * Fledge filter pipeline class for sending process * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <filter_pipeline.h> /** * The NorthFilterPipeline class is derived from FilterPipeline class and * is used to represent a pipeline of filter applicable to sending process. * Methods are provided to load filters, setup filtering pipeline and for * pipeline/filters cleanup. */ class NorthFilterPipeline : public FilterPipeline { public: NorthFilterPipeline(ManagementClient* mgtClient, StorageClient& storage, std::string serviceName); ~NorthFilterPipeline() {} // Setup the filter pipeline bool setupFiltersPipeline(void *passToOnwardFilter, void *useFilteredData, void *sendingProcess); }; #endif ================================================ FILE: C/tasks/north/sending_process/include/north_plugin.h ================================================ #ifndef _NORTH_PLUGIN #define _NORTH_PLUGIN /* * Fledge north plugin. * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <plugin.h> #include <plugin_manager.h> #include <reading.h> #include <config_category.h> #include <plugin_data.h> /** * Class that represents a north plugin. * * The purpose of this class is to hide the use of the pointers into the * dynamically loaded plugin and wrap the interface into a class that * can be used directly in the north subsystem. * * This is achieved by having a set of private member variables which are * the pointers to the functions in the plugin, and a set of public methods * that will call these functions via the function pointers. */ class NorthPlugin : public Plugin { public: // Methods NorthPlugin(const PLUGIN_HANDLE handle); ~NorthPlugin(); void shutdown(); std::string shutdownSaveData(); uint32_t send(const std::vector<Reading* >& readings) const; PLUGIN_HANDLE init(const ConfigCategory& config); bool persistData() { return info->options & SP_PERSIST_DATA; }; void start(); void startData(const std::string& pluginData); private: // Function pointers void (*pluginShutdown)(const PLUGIN_HANDLE); std::string (*pluginShutdownData)(const PLUGIN_HANDLE); uint32_t (*pluginSend)(const PLUGIN_HANDLE, const std::vector<Reading* >& readings); PLUGIN_HANDLE (*pluginInit)(const ConfigCategory* config); void (*pluginStart)(PLUGIN_HANDLE); void (*pluginStartData)(PLUGIN_HANDLE, const std::string& pluginData); public: // Persist plugin data PluginData* m_plugin_data; private: // Attributes PLUGIN_HANDLE m_instance; }; #endif ================================================ FILE: C/tasks/north/sending_process/include/sending.h ================================================ #ifndef _SENDING_PROCESS_H #define _SENDING_PROCESS_H /* * Fledge process class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <process.h> #include <thread> #include <north_plugin.h> #include <reading.h> #include <filter_plugin.h> #include <north_filter_pipeline.h> #include <asset_tracking.h> // SendingProcess class class SendingProcess : public FledgeProcess { public: // Constructor: SendingProcess(int argc, char** argv); // Destructor ~SendingProcess(); void run() const; void stop(); int getStreamId() const { return m_stream_id; }; bool isRunning() const { if (m_dryRun) return false; return m_running; }; void stopRunning() { m_running = false; }; void setLastFetchId(unsigned long id) { m_last_fetch_id = id; }; unsigned long getLastFetchId() const { return m_last_fetch_id; }; void setLastSentId(unsigned long id) { m_last_sent_id = id; }; unsigned long getLastSentId() const { return m_last_sent_id; }; unsigned long getSentReadings() const { return m_tot_sent; }; bool updateSentReadings(unsigned long num) { m_tot_sent += num; return m_tot_sent; }; void resetSentReadings() { m_tot_sent = 0; }; void updateDatabaseCounters(); bool getLastSentReadingId(); bool createStream(int); int createNewStream(); unsigned int getDuration() const { return m_duration; }; unsigned int getSleepTime() const { return m_sleep; }; bool getUpdateDb() const { return m_update_db; }; bool setUpdateDb(bool val) { m_update_db = val; return m_update_db; }; unsigned long getReadBlockSize() const { return m_block_size; }; const std::string& getDataSourceType() const { return m_data_source_t; }; const std::string& getPluginName() const { return m_plugin_name; }; void setLoadBufferIndex(unsigned long loadBufferIdx); unsigned long getLoadBufferIndex() const; const unsigned long* getLoadBufferIndexPtr() const; unsigned long getMemoryBufferSize() const { return m_memory_buffer_size; }; void createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name, std::string current_name, std::string current_description); // Public static methods public: static void setLoadBufferData(unsigned long index, ReadingSet* readings); static std::vector<ReadingSet *>* getDataBuffers() { return m_buffer_ptr; }; static void useFilteredData(OUTPUT_HANDLE *outHandle, READINGSET* readings); static void passToOnwardFilter(OUTPUT_HANDLE *outHandle, READINGSET* readings); private: std::string retrieveTableInformationName(const char* dataSource); void updateStreamLastSentId(long lastSentId); void setDuration(unsigned int val) { m_duration = val; }; void setSleepTime(unsigned long val) { m_sleep = val; }; void setReadBlockSize(unsigned long size) { m_block_size = size; }; bool loadPlugin(const std::string& pluginName); ConfigCategory fetchConfiguration(const std::string& defCfg, const std::string& pluginName); bool loadFilters(const std::string& pluginName); void updateStatistics(std::string& stat_key, const std::string& stat_description); // Make private the copy constructor and operator= SendingProcess(const SendingProcess &); SendingProcess& operator=(SendingProcess const &); public: std::vector<ReadingSet *> m_buffer; std::thread* m_thread_load; std::thread* m_thread_send; NorthPlugin* m_plugin; std::vector<unsigned long> m_last_read_id; NorthFilterPipeline* filterPipeline; private: bool m_running; int m_stream_id; unsigned long m_last_sent_id; unsigned long m_last_fetch_id; unsigned long m_tot_sent; unsigned int m_duration; unsigned long m_sleep; unsigned long m_block_size; bool m_update_db; std::string m_plugin_name; Logger* m_logger; std::string m_data_source_t; unsigned long m_load_buffer_index; unsigned long m_memory_buffer_size = 1; // static pointer for data buffer access static std::vector<ReadingSet *>* m_buffer_ptr; AssetTracker *m_assetTracker; }; #endif ================================================ FILE: C/tasks/north/sending_process/north_filter_pipeline.cpp ================================================ /* * Fledge filter pipeline class for sending process * * Copyright (c) 2019 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Amandeep Singh Arora */ #include <north_filter_pipeline.h> #include <sending.h> #define JSON_CONFIG_FILTER_ELEM "filter" #define JSON_CONFIG_PIPELINE_ELEM "pipeline" using namespace std; /** * NorthFilterPipeline class constructor * * This class abstracts the filter pipeline interface for sending process * * @param mgtClient Management client handle * @param storage Storage client handle * @param serviceName Name of the service to which this pipeline applies */ NorthFilterPipeline::NorthFilterPipeline(ManagementClient* mgtClient, StorageClient& storage, string serviceName) : FilterPipeline(mgtClient, storage, serviceName) { } /** * Set the filter pipeline for sending process * * This method calls the the method "plugin_init" for all loadad filters. * Up to date filter configurations and Ingest filtering methods * are passed to "plugin_init" * * @param passToOnwardFilter Ptr to function that passes data to next filter * @param useFilteredData Ptr to function that gets final filtered data * @param _sendingProcess The SendingProcess class handle * @return True on success, * False otherwise. * @thown Any caught exception */ bool NorthFilterPipeline::setupFiltersPipeline(void *passToOnwardFilter, void *useFilteredData, void *_sendingProcess) { bool initErrors = false; string errMsg = "'plugin_init' failed for filter '"; for (auto it = m_filters.begin(); it != m_filters.end(); ++it) { if ((*it)->isBranch()) { PipelineBranch *branch = (PipelineBranch *)(*it); branch->setFunctions(passToOnwardFilter, useFilteredData, _sendingProcess); } (*it)->setup(mgtClient, _sendingProcess, m_filterCategories); // Iterate the load filters set in the Ingest class m_filters member if ((it + 1) != m_filters.end()) { // Set next filter pointer as OUTPUT_HANDLE if (!(*it)->init((OUTPUT_HANDLE *)(*(it + 1)), filterReadingSetFn(passToOnwardFilter))) { errMsg += (*it)->getName() + "'"; initErrors = true; break; } } else { // Set load buffer index pointer as OUTPUT_HANDLE SendingProcess *sendingProcess = (SendingProcess *) _sendingProcess; const unsigned long* bufferIndex = sendingProcess->getLoadBufferIndexPtr(); // Set the Ingest class pointer as OUTPUT_HANDLE if (!(*it)->init((OUTPUT_HANDLE *)(bufferIndex), filterReadingSetFn(useFilteredData))) { errMsg += (*it)->getName() + "'"; initErrors = true; break; } } } if (initErrors) { // Failure Logger::getLogger()->fatal("%s error: %s", __FUNCTION__, errMsg.c_str()); return false; } // Set filter pipeline is ready for data ingest m_ready = true; //Success return true; } ================================================ FILE: C/tasks/north/sending_process/north_plugin.cpp ================================================ /* * Fledge north plugin * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <north_plugin.h> #include <iostream> using namespace std; /** * Constructor for the class that wraps the OMF north plugin * * Create a set of function pointers. * @param handle The loaded plugin handle */ NorthPlugin::NorthPlugin(const PLUGIN_HANDLE handle) : Plugin(handle) { // Setup the function pointers to the plugin pluginInit = (PLUGIN_HANDLE (*)(const ConfigCategory* config)) manager->resolveSymbol(handle, "plugin_init"); pluginShutdown = (void (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginShutdownData = (string (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_shutdown"); pluginSend = (uint32_t (*)(const PLUGIN_HANDLE, const vector<Reading* >& readings)) manager->resolveSymbol(handle, "plugin_send"); pluginStart = (void (*)(const PLUGIN_HANDLE)) manager->resolveSymbol(handle, "plugin_start"); pluginStartData = (void (*)(const PLUGIN_HANDLE, const string& storedData)) manager->resolveSymbol(handle, "plugin_start"); // Persist data initialised m_plugin_data = NULL; } // Destructor NorthPlugin::~NorthPlugin() { delete m_plugin_data; } /** * Initialise the plugin with configuration data * * @param config The configuration data * @return The plugin handle */ PLUGIN_HANDLE NorthPlugin::init(const ConfigCategory& config) { // Pass input data pointer m_instance = this->pluginInit(&config); return &m_instance; } /** * Call the start method in the plugin * with no persisted data */ void NorthPlugin::start() { // Ccheck pluginStart function pointer exists if (this->pluginStart) { this->pluginStart(m_instance); } } /** * Call the start method in the plugin * passing persisted data */ void NorthPlugin::startData(const string& storedData) { // Ccheck pluginStartData function pointer exists if (this->pluginStartData) { this->pluginStartData(m_instance, storedData); } } /** * Send vector (by reference) of readings pointer to historian server * * @param readings The readings data * @return The readings sent or 0 in case of any error */ uint32_t NorthPlugin::send(const vector<Reading* >& readings) const { return this->pluginSend(m_instance, readings); } /** * Call the shutdown method in the plugin */ void NorthPlugin::shutdown() { // Ccheck pluginShutdown function pointer exists if (this->pluginShutdown) { return this->pluginShutdown(m_instance); } } /** * Call the shutdown method in the plugin * and return plugin data to parsist as JSON string */ string NorthPlugin::shutdownSaveData() { string ret(""); // Check pluginShutdownData function pointer exists if (this->pluginShutdownData) { ret = this->pluginShutdownData(m_instance); } return ret; } ================================================ FILE: C/tasks/north/sending_process/sending.cpp ================================================ /* * Fledge process class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sending.h> #include <csignal> #include <sys/prctl.h> #include <filter_plugin.h> #include <map> #define VERBOSE_LOG 0 #define PLUGIN_UNDEFINED "" // The type of the plugin managed by the Sending Process #define PLUGIN_TYPE "north" #define GLOBAL_CONFIG_KEY "GLOBAL_CONFIGURATION" #define PLUGIN_CONFIG_KEY "PLUGIN" #define PLUGIN_TYPES_KEY "OMF_TYPES" // Configuration retrieved from the Configuration Manager #define CONFIG_CATEGORY_DESCRIPTION "Configuration of the Sending Process" #define CATEGORY_OMF_TYPES_DESCRIPTION "Configuration of OMF types" // Used for the handling of the hierarchical configuration structure #define PARENT_CONFIGURATION_KEY "North" using namespace std; // Default values for the creation of a new stream, // the description is derived from the parameter --name #define NEW_STREAM_LAST_OBJECT 0 // Data sources handled by the sending process #define DATA_SOURCE_READINGS "readings" #define DATA_SOURCE_STATISTICS "statistics" #define DATA_SOURCE_AUDIT "audit" #define DATA_SOURCE_INFORMATION_TABLE_NAME 0 #define DATA_SOURCE_INFORMATION_STAT_KEY 1 #define DATA_SOURCE_INFORMATION_STAT_DESCR 2 // Translation from the data source type to data source information const map<string, std::tuple<string, string, string>> data_source_to_information = { // Data source - TableName - Statistics key - Statistics description {DATA_SOURCE_READINGS, std::make_tuple("readings", "Readings Sent", "Readings Sent North")}, {DATA_SOURCE_STATISTICS, std::make_tuple("statistics", "Statistics Sent", "Statistics Sent North")}, {DATA_SOURCE_AUDIT, std::make_tuple("audit", "Audit Sent", "Audit Sent North")} }; // static pointer to data buffers for filter plugins std::vector<ReadingSet*>* SendingProcess::m_buffer_ptr = 0; // Used to identifies logs const string LOG_SERVICE_NAME = "SendingProcess/sending"; static map<string, string> globalConfiguration = {}; // Sending process default configuration static const string sendingDefaultConfig = QUOTE({ "enable": { "description": "A switch that can be used to enable or disable execution of the sending process.", "type": "boolean", "default": "true" , "readonly": "true" }, "streamId": { "description": "Identifies the specific stream to handle and the related information, among them the ID of the last object streamed.", "type": "integer", "default": "0", "readonly": "true" } }); // Sending process advanced configuration static const string sendingAdvancedConfig = QUOTE({ "duration": { "description": "How long the sending process should run (in seconds) before stopping.", "type": "integer", "default": "60", "order": "30", "displayName" : "Duration" }, "blockSize": { "description": "The size of a block of readings to send in each transmission.", "type": "integer", "default": "500", "order": "31", "displayName" : "Readings Block Size" }, "sleepInterval": { "description": "A period of time, expressed in seconds, to wait between attempts to send readings when there are no readings to be sent.", "type": "integer", "default": "1", "order": "32", "displayName" : "Sleep Interval" }, "memoryBufferSize": { "description": "Number of elements of blockSize size to be buffered in memory", "type": "integer", "default": "10", "order": "33", "displayName" : "Memory Buffer Size" , "readonly": "false" }, "logLevel" : { "description" : "Minimum level of message logged", "type" : "enumeration", "options" : [ "error", "warning", "info", "debug" ], "displayName" : "Log Level", "default" : "warning", "order" : "40" } }); volatile std::sig_atomic_t signalReceived = 0; // Handle Signals static void signalHandler(int signal) { signalReceived = signal; } /** * SendingProcess class methods */ // Destructor SendingProcess::~SendingProcess() { delete m_thread_load; delete m_thread_send; delete m_plugin; } // SendingProcess Class Constructor SendingProcess::SendingProcess(int argc, char** argv) : FledgeProcess(argc, argv) { m_logger = Logger::getLogger(); // the stream_id to use is retrieved from the configuration m_stream_id = -1; m_plugin_name = PLUGIN_UNDEFINED; #if VERBOSE_LOG int i; for (i = 0; i < argc; i++) { m_logger->debug("%s - param :%d: :%s:", LOG_SERVICE_NAME.c_str(), i, argv[i]); } #endif // Mark running state m_running = true; // NorthPlugin m_plugin = NULL; // Set vars & counters to 0, false m_last_sent_id = 0; m_tot_sent = 0; m_update_db = false; Logger::getLogger()->info("SendingProcess is starting"); /** * Get Configuration from sending process and loaded plugin * Create or update configuration via Fledge API */ // Reads the sending process configuration ConfigCategory processDefault = this->fetchConfiguration(sendingDefaultConfig, PLUGIN_UNDEFINED); // The allocation should be done after fetchConfiguration // as the value for m_memory_buffer_size is retrieved from the configuration // // Set buffer of ReadingSet with NULLs m_buffer.resize(m_memory_buffer_size, NULL); // Initialise buffer last read id m_last_read_id.resize(m_memory_buffer_size, 0); // Set the static pointer m_buffer_ptr = &m_buffer; if (m_plugin_name == PLUGIN_UNDEFINED) { // Ends the execution if the plug-in is not defined string errMsg(LOG_SERVICE_NAME + \ " - the plugin-in is not defined " "for the sending process :" + this->getName() + " :."); m_logger->fatal(errMsg); throw runtime_error(errMsg); } // Loads the plug-in if (!loadPlugin(string(m_plugin_name))) { string errMsg("SendingProcess: failed to load north plugin '"); errMsg.append(m_plugin_name); errMsg += "'."; Logger::getLogger()->fatal(errMsg); throw runtime_error(errMsg); } // Read now the sending process configuration merged with the one // related to the loaded plugin ConfigCategory config = this->fetchConfiguration(sendingDefaultConfig, m_plugin_name); #if VERBOSE_LOG m_logger->debug("%s - stream-id :%d:", LOG_SERVICE_NAME.c_str(), m_stream_id); #endif // Checks if stream-id is undefined, it allocates a new one in the case if (m_stream_id == 0) { m_logger->info("%s - stream-id is undefined, allocating a new one.", LOG_SERVICE_NAME.c_str()); m_stream_id = this->createNewStream(); if (m_stream_id == 0) { string errMsg(LOG_SERVICE_NAME + " - it is not possible to create a new stream."); m_logger->fatal(errMsg); throw runtime_error(errMsg); } else { m_logger->info("%s - new stream-id allocated :%d:", LOG_SERVICE_NAME.c_str(), m_stream_id); const string categoryName = this->getName(); const string itemName = "streamId"; const string itemValue = to_string(m_stream_id); // Prepares the error message in case of an error string errMsg(LOG_SERVICE_NAME + \ " - it is not possible to update the item :" + \ itemName + " : of the category :" + categoryName + ":"); try { this->getManagementClient()->setCategoryItemValue(categoryName, itemName, itemValue); m_logger->info("%s - configuration updated, using stream-id :%d:", LOG_SERVICE_NAME.c_str(), m_stream_id); } catch (std::exception* e) { delete e; m_logger->error(errMsg); throw runtime_error(errMsg); } catch (...) { m_logger->fatal(errMsg); throw runtime_error(errMsg); } } } // Init plugin with merged configuration from Fledge API this->m_plugin->init(config); if(m_dryRun) { return; } if (this->m_plugin->m_plugin_data) { // If plugin has SP_PERSIST_DATA: // 1 - load plugin stored data from storage: key is taskName + pluginName string storedData = this->m_plugin->m_plugin_data->loadStoredData(this->getName() + m_plugin_name); // 2 - call 'plugin_start' with plugin data: startData() m_plugin->startData(storedData); } else { // Call 'plugin_start' without parameters: start() m_plugin->start(); } // Fetch last_object sent from fledge.streams if (!this->getLastSentReadingId()) { m_logger->warn(LOG_SERVICE_NAME + " - Last object id for stream '" + to_string(m_stream_id) + "' NOT found, creating a new stream."); if (!this->createStream(m_stream_id)) { string errMsg(LOG_SERVICE_NAME + " - It is not possible to create a new stream for streamId :" + to_string(m_stream_id) + ":."); m_logger->fatal(errMsg); throw runtime_error(errMsg); } else { m_logger->info(LOG_SERVICE_NAME + " - streamId :" + to_string(m_stream_id) + ": created."); } } #if VERBOSE_LOG Logger::getLogger()->info("SendingProcess initialised with %d data buffers.", m_memory_buffer_size); Logger::getLogger()->info("SendingProcess data source type is '%s'", this->getDataSourceType().c_str()); Logger::getLogger()->info("SendingProcess reads data from last id %lu", this->getLastSentId()); #endif filterPipeline = NULL; m_assetTracker = new AssetTracker(getManagementClient(), getName()); AssetTracker::getAssetTracker()->populateAssetTrackingCache(getName(), "Egress"); // Load filter plugins if (!this->loadFilters(this->getName())) { Logger::getLogger()->fatal("SendingProcess failed loading filter plugins. Exiting"); throw runtime_error(LOG_SERVICE_NAME + " failure while loading filter plugins."); } } // While running check signals and execution time void SendingProcess::run() const { // Requests the kernel to deliver SIGHUP when parent dies prctl(PR_SET_PDEATHSIG, SIGHUP); // We handle these signals, add more if needed std::signal(SIGHUP, signalHandler); std::signal(SIGINT, signalHandler); std::signal(SIGSTOP, signalHandler); std::signal(SIGTERM, signalHandler); std::signal(SIGABRT, signalHandler); // Catches the Fledge kill command // Check running time time_t elapsedSeconds = 0; while (elapsedSeconds < (time_t)m_duration) { // Check whether a signal has been received if (signalReceived != 0) { Logger::getLogger()->info("SendingProcess is stopping due to caught signal %d (%s)", signalReceived, strsignal(signalReceived), elapsedSeconds); break; } // Just sleep sleep(m_sleep); if (m_dryRun) // We do this here to allow the threads time to setup { break; } elapsedSeconds = time(NULL) - this->getStartTime(); } Logger::getLogger()->info("SendingProcess is stopping, after %d seconds.", elapsedSeconds); } /** * Load the Historian specific 'transform & send data' plugin * * @param pluginName The plugin to load * @return true if loded, false otherwise */ bool SendingProcess::loadPlugin(const string& pluginName) { PluginManager *manager = PluginManager::getInstance(); if (pluginName.empty()) { Logger::getLogger()->error("Unable to fetch north plugin " "'%s' from configuration.", pluginName.c_str()); return false; } Logger::getLogger()->info("Load north plugin '%s'.", pluginName.c_str()); PLUGIN_HANDLE handle; if ((handle = manager->loadPlugin(pluginName, PLUGIN_TYPE_NORTH)) != NULL) { #if VERBOSE_LOG Logger::getLogger()->info("Loaded north plugin '%s'.", pluginName.c_str()); #endif m_plugin = new NorthPlugin(handle); // Check persist data option for plugin. if (m_plugin->persistData()) { // Instantiate PluginData class for persistence of data m_plugin->m_plugin_data = new PluginData(this->getStorageClient()); } return true; } return false; } // Stop running threads & cleanup used resources void SendingProcess::stop() { // End of processing loop for threads this->stopRunning(); // Threads execution has completed. this->m_thread_load->join(); this->m_thread_send->join(); // Remove the data buffers for (unsigned int i = 0; i < m_memory_buffer_size; i++) { ReadingSet* data = this->m_buffer[i]; if (data != NULL) { delete data; } } // Cleanup the plugin resources if (this->m_plugin->m_plugin_data) { // If plugin has SP_PERSIST_DATA option: // 1- call shutdownSaveData and get up-to-date plugin data. string saveData = this->m_plugin->shutdownSaveData(); // 2- store returned data: key is taskName + pluginName string key(this->getName() + m_plugin_name); if (!this->m_plugin->m_plugin_data->persistPluginData(key, saveData, this->getName())) { Logger::getLogger()->error("Plugin %s has failed to save data [%s] for key %s and task name %s", m_plugin_name.c_str(), saveData.c_str(), key.c_str(), this->getName().c_str()); } } else { // No data to save this->m_plugin->shutdown(); } // Cleanup filters if (filterPipeline) { filterPipeline->cleanupFilters(getName()); delete filterPipeline; } Logger::getLogger()->info("SendingProcess successfully terminated"); } /** * Sets the position of the readings table the sending procress * has already sent * * @lastSentId Id of the readings table already sent */ void SendingProcess::updateStreamLastSentId(long lastSentId) { string streamId = to_string(this->getStreamId()); // Prepare WHERE id = val const Condition conditionStream(Equals); Where wStreamId("id", conditionStream, streamId); // Prepare last_object = value InsertValues lastId; lastId.push_back(InsertValue("last_object",lastSentId)); // Perform UPDATE fledge.streams SET last_object = x WHERE id = y this->getStorageClient()->updateTable("streams", lastId, wStreamId); } /** * Update database tables statistics and streams * setting last_object id in streams */ void SendingProcess::updateDatabaseCounters() { updateStreamLastSentId((long)this->getLastSentId()); // Updates 'Master' statistic string stat_key; string stat_description; // Identifies the statistics that should be updated in relation to the data source auto item = data_source_to_information.find(m_data_source_t); if (item != data_source_to_information.end()) { stat_key = std::get<DATA_SOURCE_INFORMATION_STAT_KEY>(item->second); stat_description = std::get<DATA_SOURCE_INFORMATION_STAT_DESCR>(item->second); } this->updateStatistics(stat_key, stat_description); // Updates 'stream' specific statistic stat_key = this->getName(); stat_description = stat_key; this->updateStatistics(stat_key, stat_description); } /** * Update database tables statistics * numReadings sent in statistics * it either updates the specific row if it is already available * or add the new row */ void SendingProcess::updateStatistics(string& stat_key, const string& stat_description) { if (stat_key.empty()) { Logger::getLogger()->error("It is not possible to update the statistics as the data source is unknown, data source -%s-", m_data_source_t.c_str()); } else { // Prepare "WHERE key = name const Condition conditionStat(Equals); Where wLastStat("key", conditionStat, stat_key); // Prepare value = value + inc ExpressionValues updateValue; updateValue.push_back(Expression("value", "+", (int)this->getSentReadings())); // Perform UPDATE fledge.statistics SET value = value + x WHERE key = 'name' int row_affected = this->getStorageClient()->updateTable("statistics", updateValue, wLastStat); if (row_affected == -1){ // The required row is not in the statistics table yet // this situation happens only at the initial setup // adding the required row. Logger::getLogger()->info("Adding a new row into the statistics as it is not present yet, key -%s- description -%s-" ,stat_key.c_str() ,stat_description.c_str()); InsertValues values; values.push_back(InsertValue("key", stat_key)); values.push_back(InsertValue("description", stat_description)); values.push_back(InsertValue("value", (int)this->getSentReadings())); string table = "statistics"; if (getStorageClient()->insertTable(table, values) != 1) { getLogger()->error("Failed to insert a new row into the %s", table.c_str()); } else { Logger::getLogger()->info("New row added into the %s, key -%s- description -%s-" ,table.c_str() ,stat_key.c_str() ,stat_description.c_str()); } } } } /** * Retrieves the name table of the data source * * @dataSource datasource for which the table name should be identified * @return table name */ string SendingProcess::retrieveTableInformationName(const char* dataSource) { string tableInfo; // Identifies table name auto item = data_source_to_information.find(dataSource); if (item != data_source_to_information.end()) { tableInfo = std::get<DATA_SOURCE_INFORMATION_TABLE_NAME>(item->second); } return(tableInfo); } /** * Get last_object id sent for current stream_id * Access foglam.streams table. * * @return true if last_object is found, false otherwise */ bool SendingProcess::getLastSentReadingId() { // Fetch last_object sent from fledge.streams bool foundId = false; const Condition conditionId(Equals); string streamId = to_string(this->getStreamId()); Where* wStreamId = new Where("id", conditionId, streamId); // SELECT * FROM fledge.streams WHERE id = x Query qLastId(wStreamId); ResultSet* lastObjectId = this->getStorageClient()->queryTable("streams", qLastId); if (lastObjectId != NULL && lastObjectId->rowCount()) { // Get the first row only ResultSet::RowIterator it = lastObjectId->firstRow(); // Access the element ResultSet::Row* row = *it; if (row) { // Get column value ResultSet::ColumnValue* theVal = row->getColumn("last_object"); // Set found id this->setLastSentId((unsigned long)theVal->getInteger()); foundId = true; } } // Free result set delete lastObjectId; return foundId; } /** * Creates a new stream, it adds a new row into the streams table allocating a new stream id * * @return newly created stream, 0 otherwise */ int SendingProcess::createNewStream() { int streamId = 0; InsertValues streamValues; streamValues.push_back(InsertValue("description", this->getName())); streamValues.push_back(InsertValue("last_object", NEW_STREAM_LAST_OBJECT)); if (getStorageClient()->insertTable("streams", streamValues) != 1) { getLogger()->error("Failed to insert a row into the streams table"); } else { // Select the row just created, having description='process name' const Condition conditionId(Equals); string name = getName(); Where* wName = new Where("description", conditionId, name); Query qName(wName); ResultSet* rows = this->getStorageClient()->queryTable("streams", qName); if (rows != NULL && rows->rowCount()) { // Get the first row only ResultSet::RowIterator it = rows->firstRow(); // Access the element ResultSet::Row* row = *it; if (row) { // Get column value ResultSet::ColumnValue* theVal = row->getColumn("id"); streamId = (int)theVal->getInteger(); } } delete rows; } return streamId; } /** * Creates a new stream, it adds a new row into the streams table allocating specific stream id * * @return true if successful created, false otherwise */ bool SendingProcess::createStream(int streamId) { bool created = false; InsertValues streamValues; streamValues.push_back(InsertValue("id", streamId)); streamValues.push_back(InsertValue("description", this->getName())); streamValues.push_back(InsertValue("last_object", NEW_STREAM_LAST_OBJECT)); if (getStorageClient()->insertTable("streams", streamValues) != 1) { getLogger()->error("Failed to insert a row into the streams table for the streamId :%d:" ,streamId); } else { created = true; // Set initial last_object this->setLastSentId((unsigned long) NEW_STREAM_LAST_OBJECT); } return created; } /** * Creates config categories and sub categories recursively, along with their parent-child relations */ void SendingProcess::createConfigCategories(DefaultConfigCategory configCategory, std::string parent_name, std::string current_name, std::string current_description) { // Deal with registering and fetching the configuration DefaultConfigCategory defConfig(configCategory); defConfig.setDescription(current_description); DefaultConfigCategory defConfigCategoryOnly(defConfig); defConfigCategoryOnly.keepItemsType(ConfigCategory::ItemType::CategoryType); defConfig.removeItemsType(ConfigCategory::ItemType::CategoryType); // Create/Update category name (we pass keep_original_items=true) if (! this->getManagementClient()->addCategory(defConfig, true)) { string errMsg = string("Failure creating/updating configuration key '").append(current_name).append("'"); Logger::getLogger()->fatal(errMsg); throw runtime_error(errMsg); } // Add parent-child relationship vector<string> children; children.push_back(current_name); this->getManagementClient()->addChildCategories(parent_name, children); // Adds sub categories to the configuration bool extracted = true; ConfigCategory subCategory; while (extracted) { extracted = subCategory.extractSubcategory(defConfigCategoryOnly); if (extracted) { DefaultConfigCategory defSubCategory(subCategory); createConfigCategories(defSubCategory, current_name, subCategory.getName(), subCategory.getDescription()); // Cleans the category subCategory.removeItems(); subCategory = ConfigCategory() ; } } } /** * Create or Update the sending process configuration * by accessing Fledge rest API service * * SendingProcess + plugin DEFAULT configuration is passed to * configuration manager and a merged one with "value" and "default" * is returned. * * Return to caller the configuration items as a ConfigCategory object * * @param defaultConfig Sending Process default configuration * @param plugin_name The plugin name: if not set yet * passed value is PLUGIN_UNDEFINED * @return The configuration category with Sending Process defaults * and plugin defaults * @throw runtime_error */ ConfigCategory SendingProcess::fetchConfiguration(const std::string& defaultConfig, const std::string& plugin_name) { // retrieves the configuration using the value of the --name parameter // (received in the command line) as the key string categoryName(this->getName()); #if VERBOSE_LOG Logger::getLogger()->debug("%s - catName :%s:", LOG_SERVICE_NAME.c_str(), categoryName.c_str()); #endif ConfigCategory configuration; ConfigCategory advancedConfiguration; try { // Create category, with "default" values only DefaultConfigCategory category(categoryName, defaultConfig); category.setDescription(CONFIG_CATEGORY_DESCRIPTION); // Build JSON merged configuration (sendingProcess + pluginConfig if (plugin_name != PLUGIN_UNDEFINED) { // Get plugin default config via API method "plugin_info" const PLUGIN_INFORMATION *info = this->m_plugin->getInfo(); DefaultConfigCategory pluginInfo(categoryName, info->config); // Copy all pluginInfo items into current sendingProcess config category += pluginInfo; } // Create/Update hierarchical configuration categories createConfigCategories(category, PARENT_CONFIGURATION_KEY, categoryName, CONFIG_CATEGORY_DESCRIPTION); // Create advanced configuration category string advancedCatName = categoryName + string("Advanced"); DefaultConfigCategory defConfigAdvanced(advancedCatName, sendingAdvancedConfig); // Set/Updaqte advanced configuration category this->getManagementClient()->addCategory(defConfigAdvanced, true); // Set advanced configuration category as child pf parent categoryName vector<string> children1; children1.push_back(advancedCatName); this->getManagementClient()->addChildCategories(categoryName, children1); // Get the category with values and defaults configuration = this->getManagementClient()->getCategory(categoryName); // Get the advanced category with values and defaults advancedConfiguration = this->getManagementClient()->getCategory(advancedCatName); /** * Handle the sending process parameters here: * fetch the Advanced configuration */ string blockSize = advancedConfiguration.getValue("blockSize"); string duration = advancedConfiguration.getValue("duration"); string sleepInterval = advancedConfiguration.getValue("sleepInterval"); string memoryBufferSize = advancedConfiguration.getValue("memoryBufferSize"); string minLevel = advancedConfiguration.getValue("logLevel"); Logger::getLogger()->setMinLevel(minLevel); // Handles the case in which the stream_id is not defined // in the configuration and sets it to not defined (0) string streamId = ""; try { streamId = configuration.getValue("streamId"); } catch (std::exception* e) { delete e; streamId = "0"; } catch (...) { streamId = "0"; } // sets to undefined if not defined in the configuration try { m_plugin_name = configuration.getValue("plugin"); } catch (std::exception* e) { delete e; m_plugin_name = PLUGIN_UNDEFINED; } catch (...) { m_plugin_name = PLUGIN_UNDEFINED; } /** * Set member variables */ m_block_size = strtoul(blockSize.c_str(), NULL, 10); m_sleep = strtoul(sleepInterval.c_str(), NULL, 10); m_duration = strtoul(duration.c_str(), NULL, 10); m_stream_id = atoi(streamId.c_str()); // Set the data source type: readings (default) or statistics try { m_data_source_t = configuration.getValue("source"); } catch (...) { m_data_source_t = "readings"; } // Sets the m_memory_buffer_size = 1 in case of an invalid value // from the configuration like for example "A432" m_memory_buffer_size = strtoul(memoryBufferSize.c_str(), NULL, 10); if (m_memory_buffer_size < 1) { m_memory_buffer_size = 1; } #if VERBOSE_LOG Logger::getLogger()->info("SendingProcess configuration parameters: " "pluginName=%s, source=%s, blockSize=%d, " "duration=%d, sleepInterval=%d, streamId=%d", m_plugin_name.c_str(), m_data_source_t.c_str(), m_block_size, m_duration, m_sleep, m_stream_id); #endif // Return configuration return ConfigCategory(configuration); } catch (std::exception* e) { return ConfigCategory(configuration); } catch (...) { return ConfigCategory(configuration); } } /** * Load filter plugins for the given configuration * * @param categoryName The sending process category name * @return True if filters were loaded and initialised * or there are no filters * False with load/init errors */ bool SendingProcess::loadFilters(const string& categoryName) { filterPipeline = new NorthFilterPipeline(this->getManagementClient(), *(this->getStorageClient()), getName()); // Try to load filters: if (!filterPipeline->loadFilters(categoryName)) { // return false on any error return false; } // return true if no filters if (filterPipeline->getFilterCount() == 0) { return true; } // We have some filters: set up the filter pipeline return filterPipeline->setupFiltersPipeline((void *)passToOnwardFilter, (void *)useFilteredData, this); } /** * Use the current input readings (they have been filtered * by all filters) * * Note: * This routine must passed to last filter "plugin_init" only * * Static method * * @param outHandle Pointer to current buffer index * where to add the readings * @param readings Filtered readings to add to buffer[index] */ void SendingProcess::useFilteredData(OUTPUT_HANDLE *outHandle, READINGSET *readings) { // Handle the readings set by adding readings set to data buffer[index] unsigned long* loadBufferIndex = (unsigned long *)outHandle; SendingProcess::getDataBuffers()->at(*loadBufferIndex) = (ReadingSet *)readings; } /** * Pass the current readings set to the next filter in the pipeline * * Note: * This routine must be passed to all filters "plugin_init" except the last one * * Static method * * @param outHandle Pointer to next filter * @param readings Current readings set */ void SendingProcess::passToOnwardFilter(OUTPUT_HANDLE *outHandle, READINGSET *readings) { // Get next filter in the pipeline PipelineElement *next = (PipelineElement *)outHandle; // Pass readings to next filter next->ingest(readings); } /** * Set the current buffer load index * * @param loadBufferIndex The buffer load index the load thread is using */ void SendingProcess::setLoadBufferIndex(unsigned long loadBufferIndex) { m_load_buffer_index = loadBufferIndex; } /** * Get the current buffer load index * * @return The buffer load index the load thread is using */ unsigned long SendingProcess::getLoadBufferIndex() const { return m_load_buffer_index; } /** * Get the current buffer load index pointer * * NOTE: * this routine must be called only to pass the index pointer * to the last filter in the pipeline for the readings set. * * @return The pointer to the buffer load index being used by the load thread */ const unsigned long* SendingProcess::getLoadBufferIndexPtr() const { return &m_load_buffer_index; } ================================================ FILE: C/tasks/north/sending_process/sending_process.cpp ================================================ /* * Fledge process class * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Massimiliano Pinto */ #include <sending.h> #include <condition_variable> #include <reading_set.h> #include <plugin_manager.h> #include <plugin_api.h> #include <plugin.h> #define VERBOSE_LOG 0 /** * The sending process is run according to a schedule in order to send reading data * to the historian, e.g. the PI system. * It’s role is to implement the rules as to what needs to be sent and when, * extract the data from the storage subsystem and stream it to the north * for sending to the external system. * The sending process does not implement the protocol used to send the data, * that is devolved to the translation plugin in order to allow for flexibility * in the translation process. */ #define TASK_FETCH_SLEEP 500 #define TASK_SEND_SLEEP 500 #define TASK_SLEEP_MAX_INCREMENTS 7 // from 0,5 secs to up to 32 secs using namespace std; using namespace std::chrono; // Mutex for m_buffer access mutex readMutex; // Mutex for thread idle time mutex waitMutex; // Block the calling thread until notified to resume. condition_variable cond_var; // Buffer max elements unsigned long memoryBufferSize; // Exit code: // 0 = success (some data sent) // 1 = 100% failure sending data to north server // 2 =internal errors int exitCode = 1; // Used to identifies logs const string LOG_SERVICE_NAME = "SendingProcess/sending_process"; // Load data from storage static void loadDataThread(SendingProcess *loadData); // Send data from historian static void sendDataThread(SendingProcess *sendData); int main(int argc, char** argv) { try { // Instantiate SendingProcess class SendingProcess sendingProcess(argc, argv); if (!sendingProcess.isRunning()) { // Dryrun execution exit(0); } memoryBufferSize = sendingProcess.getMemoryBufferSize(); // Launch the load thread sendingProcess.m_thread_load = new thread(loadDataThread, &sendingProcess); // Launch the send thread sendingProcess.m_thread_send = new thread(sendDataThread, &sendingProcess); // Run: max execution time or caught signals can stop it sendingProcess.run(); // Unlock load & send threads cond_var.notify_all(); // End processing sendingProcess.stop(); } catch (const std::exception& e) { cerr << "Exception in " << argv[0] << " : " << e.what() << endl; // Return failure for class instance/configuration etc exit(2); } // Catch all exceptions catch (...) { std::exception_ptr p = std::current_exception(); string name = (p ? p.__cxa_exception_type()->name() : "null"); cerr << "Generic Exception in " << argv[0] << " : " << name << endl; exit(2); } // Return success exit(exitCode); } /** * Apply load filter * * Just call "ingest" methid of the first one * * @param loadData pointer to SendingProcess instance * @param readingSet The current reading set loaded from storage */ void applyFilters(SendingProcess* loadData, ReadingSet* readingSet) { // Get first filter PipelineElement *firstFilter = loadData->filterPipeline->getFirstFilterPlugin(); // Call first filter "ingest" // Note: // next filters will be automatically called if (firstFilter) { firstFilter->ingest(readingSet); } } /** * Thread to load data from the storage layer. * * @param loadData pointer to SendingProcess instance */ static void loadDataThread(SendingProcess *loadData) { int sleep_num_increments, sleep_time; unsigned int readIdx = 0; sleep_num_increments = 0; sleep_time = TASK_FETCH_SLEEP; // Read from the storage last Id already sent loadData->setLastFetchId(loadData->getLastSentId()); while (loadData->isRunning()) { if (readIdx >= memoryBufferSize) { readIdx = 0; } /** * Check whether m_buffer[readIdx] is NULL or contains a ReadingSet * * Access is protected by a mutex. */ readMutex.lock(); ReadingSet *canLoad = loadData->m_buffer.at(readIdx); readMutex.unlock(); if (canLoad) { #if VERBOSE_LOG Logger::getLogger()->info("SendingProcess loadDataThread: " "('%s' stream id %d), readIdx %u, buffer is NOT empty, waiting ...", loadData->getDataSourceType().c_str(), loadData->getStreamId(), readIdx); #endif Logger::getLogger()->info("SendingProcess is faster to load data than the destination to process them," " so all the %lu in memory buffers are full and the load thread should wait until at least a buffer is freed.", loadData->getMemoryBufferSize()); if (loadData->isRunning()) { // Load thread is put on hold, only if the execution should proceed unique_lock<mutex> lock(waitMutex); cond_var.wait(lock); } } else { // Load data from storage client (id >= lastId and getReadBlockSize() rows) ReadingSet* readings = NULL; try { string source = loadData->getDataSourceType(); //high_resolution_clock::time_point t1 = high_resolution_clock::now(); if (source.compare("readings") == 0) { // Read from storage all readings with id > last sent id unsigned long lastReadId = loadData->getLastFetchId() + 1; readings = loadData->getStorageClient()->readingFetch(lastReadId, loadData->getReadBlockSize()); } else if (source.compare("statistics") == 0) { // SELECT id, // key AS asset_code, // ts, // history_ts AS user_ts, // value // FROM statistic_history // WHERE id > lastId // ORDER BY ID ASC // LIMIT blockSize const Condition conditionId(GreaterThan); // WHERE id > lastId Where* wId = new Where("id", conditionId, to_string(loadData->getLastFetchId())); vector<Returns *> columns; // Add colums and needed aliases columns.push_back(new Returns("id")); columns.push_back(new Returns("key", "asset_code")); columns.push_back(new Returns("ts")); Returns *tmpReturn = new Returns("history_ts", "user_ts"); tmpReturn->timezone("utc"); columns.push_back(tmpReturn); columns.push_back(new Returns("value")); // Build the query with fields, aliases and where Query qStatistics(columns, wId); // Set limit qStatistics.limit(loadData->getReadBlockSize()); // Set sort Sort* sort = new Sort("id"); qStatistics.sort(sort); // Query the statistics_history table and get a ReadingSet result readings = loadData->getStorageClient()->queryTableToReadings("statistics_history", qStatistics); } else if (source.compare("audit") == 0) { const Condition conditionId(GreaterThan); // WHERE id > lastId Where* wId = new Where("id", conditionId, to_string(loadData->getLastFetchId())); vector<Returns *> columns; // Add colums and needed aliases columns.push_back(new Returns("id")); columns.push_back(new Returns("code", "asset_code")); columns.push_back(new Returns("ts")); Returns *tmpReturn = new Returns("ts", "user_ts"); tmpReturn->timezone("utc"); columns.push_back(tmpReturn); columns.push_back(new Returns("log", "reading")); // Build the query with fields, aliases and where Query qLog(columns, wId); // Set limit qLog.limit(loadData->getReadBlockSize()); // Set sort Sort* sort = new Sort("id"); qLog.sort(sort); // Query the log table and get a ReadingSet result readings = loadData->getStorageClient()->queryTableToReadings("log", qLog); } else { Logger::getLogger()->error("Unsupported source '%s' for north task.", source.c_str()); } //high_resolution_clock::time_point t2 = high_resolution_clock::now(); //auto duration = duration_cast<microseconds>( t2 - t1 ).count(); } catch (ReadingSetException* e) { Logger::getLogger()->error("SendingProcess loadData(): ReadingSet Exception '%s'", e->what()); } catch (std::exception& e) { Logger::getLogger()->error("SendingProcess loadData(): Generic Exception: '%s'", e.what()); } // Data fetched from storage layer if (readings != NULL && readings->getCount()) { sleep_time = TASK_FETCH_SLEEP; sleep_num_increments = 0; //Update last fetched reading Id loadData->setLastFetchId(readings->getLastId()); /** * Set last fetched reading Id for buffer index * This is used by send thread whiule updating the next * position to read from db. * NOTE: * The saved position is not ffected by the filters * called below which can skip some or all input readings. */ loadData->m_last_read_id.at(readIdx) = readings->getLastId(); /** * The buffer access is protected by a mutex */ readMutex.lock(); /** * Set now the buffer at index to ReadingSet pointer * Note: the ReadingSet pointer will be deleted by * - the sending thread when processin it * OR * at program exit by a cleanup routine * * Note: the readings set can be optionally filtered * if plugin filters are set. */ // Apply filters to the reading set if (loadData->filterPipeline) { PipelineElement *firstFilter = loadData->filterPipeline->getFirstFilterPlugin(); if (firstFilter) { // Check whether filters are set before calling ingest while (!loadData->filterPipeline->isReady()) { Logger::getLogger()->warn("Load data thread called before " "filter pipeline is ready"); std::this_thread::sleep_for(std::chrono::milliseconds(150)); } // Make the load readIdx available to filters loadData->setLoadBufferIndex(readIdx); // Apply filters applyFilters(loadData, readings); } else { // No filters: just set buffer with current data loadData->m_buffer.at(readIdx) = readings; } } else { // No filters: just set buffer with current data loadData->m_buffer.at(readIdx) = readings; } readMutex.unlock(); readIdx++; // Unlock the sendData thread unique_lock<mutex> lock(waitMutex); cond_var.notify_one(); } else { // Free empty result set if (readings) { delete readings; } // Error or no data read: just wait // TODO: add increments from 1 to TASK_SLEEP_MAX_INCREMENTS sleep_num_increments += 1; sleep_time *= 2; if (sleep_num_increments >= TASK_SLEEP_MAX_INCREMENTS) { sleep_time = TASK_FETCH_SLEEP; sleep_num_increments = 0; } this_thread::sleep_for(chrono::milliseconds(sleep_time)); } } } #if VERBOSE_LOG Logger::getLogger()->info("SendingProcess loadData thread: Last ID '%s' read is %lu", loadData->getDataSourceType().c_str(), loadData->getLastFetchId()); #endif /** * The loop is over: unlock the sendData thread */ unique_lock<mutex> lock(waitMutex); cond_var.notify_one(); } /** * Thread to send data to historian service * * @param loadData pointer to SendingProcess instance */ static void sendDataThread(SendingProcess *sendData) { unsigned long totSent = 0; unsigned int sendIdx = 0; bool slept; long sleep_time = TASK_SEND_SLEEP; int sleep_num_increments = 0; while (sendData->isRunning()) { slept = false; if (sendIdx >= memoryBufferSize) { if (sendData->getUpdateDb()) { // Update counters to Database sendData->updateDatabaseCounters(); // Reset current sent readings sendData->resetSentReadings(); // DB update done sendData->setUpdateDb(false); } // Reset send index sendIdx = 0; } /* * Check whether m_buffer[sendIdx] is NULL or contains ReadinSet data. * Access is protected by a mutex. */ readMutex.lock(); ReadingSet *canSend = sendData->m_buffer.at(sendIdx); readMutex.unlock(); if (canSend == NULL) { #if VERBOSE_LOG Logger::getLogger()->info("SendingProcess sendDataThread: " \ "('%s' stream id %d), sendIdx %u, buffer is empty, waiting ...", sendData->getDataSourceType().c_str(), sendData->getStreamId(), sendIdx); #endif if (sendData->getUpdateDb()) { // Update counters to Database sendData->updateDatabaseCounters(); // Reset current sent readings sendData->resetSentReadings(); // DB update done sendData->setUpdateDb(false); } if (sendData->isRunning()) { // Send thread is put on hold, only if the execution shoule proceed unique_lock<mutex> lock(waitMutex); cond_var.wait(lock); } } else { /** * Send the buffer content ( const vector<Readings *>& ) * to historian server via m_plugin->send(data). * Readings data by getAllReadings() will be * transformed using historian protocol and then sent to destination. */ bool emptyReadings = sendData->m_buffer[sendIdx]->getCount() == 0; uint32_t sentReadings = 0; bool processUpdate = false; if (!emptyReadings) { // We have some readings to send const vector<Reading *> &readingData = sendData->m_buffer.at(sendIdx)->getAllReadings(); if (readingData.size() <= sendData->getReadBlockSize()) { sentReadings = sendData->m_plugin->send(readingData); } else { Logger::getLogger()->debug("Breaking up incomming readings block"); // Filtering has made the readings too long, split into smaller // vectors for sending unsigned int bs = (unsigned int)sendData->getReadBlockSize(); vector<Reading *>v; for (unsigned int i = 0; i < readingData.size(); i++) { v.push_back(readingData[i]); if (i > 0 && (i % bs) == 0) { sentReadings += sendData->m_plugin->send(v); v.clear(); } } if (v.size() > 0) // Flush final partial block { sentReadings += sendData->m_plugin->send(v); v.clear(); } } // Check sent readings result if (sentReadings) { processUpdate = true; exitCode = 0; // Update asset tracker table/cache, if required vector<Reading *> *vec = sendData->m_buffer.at(sendIdx)->getAllReadingsPtr(); for (vector<Reading *>::iterator it = vec->begin(); it != vec->end(); ++it) { Reading *reading = *it; AssetTrackingTuple tuple(sendData->getName(), sendData->getPluginName(), reading->getAssetName(), "Egress"); if (!AssetTracker::getAssetTracker()->checkAssetTrackingCache(tuple)) { AssetTracker::getAssetTracker()->addAssetTrackingTuple(tuple); Logger::getLogger()->info("sendDataThread: Adding new asset tracking tuple - egress: %s", tuple.assetToString().c_str()); } } } } else { exitCode = 0; // We have an empty readings set: check last id if (sendData->m_last_read_id.at(sendIdx) > 0) { processUpdate = true; } } if (processUpdate) { exitCode = 0; /** Sending done */ sendData->setUpdateDb(true); /** * 1- emptying data in m_buffer[sendIdx]. * The buffer access is protected by a mutex. */ readMutex.lock(); // Update last sent reading Id using the last id of the unfiltered readings buffer sendData->setLastSentId(sendData->m_last_read_id.at(sendIdx)); // Free buffer delete sendData->m_buffer.at(sendIdx); sendData->m_buffer.at(sendIdx) = NULL; // Reset buffer last id sendData->m_last_read_id.at(sendIdx) = 0; /** 2- Update sent counter (memory only) */ sendData->updateSentReadings(sentReadings); // numReadings sent so far totSent += sentReadings; readMutex.unlock(); sendIdx++; // Unlock the loadData thread unique_lock<mutex> lock(waitMutex); cond_var.notify_one(); } else { Logger::getLogger()->error("SendingProcess sendDataThread: Error while sending " \ "('%s' stream id %d), sendIdx %u, N. (%d readings), " \ ", last reading id in buffer %ld", sendData->getDataSourceType().c_str(), sendData->getStreamId(), sendIdx, sendData->m_buffer[sendIdx]->getCount(), sendData->m_last_read_id.at(sendIdx)); if (sendData->getUpdateDb()) { // Update counters to Database sendData->updateDatabaseCounters(); // Reset current sent readings sendData->resetSentReadings(); // DB update done sendData->setUpdateDb(false); } // Error: just wait & continue this_thread::sleep_for(chrono::milliseconds(sleep_time)); slept = true; } } // Handles the sleep time, it is doubled every time up to a limit if (slept) { sleep_num_increments += 1; sleep_time *= 2; if (sleep_num_increments >= TASK_SLEEP_MAX_INCREMENTS) { sleep_time = TASK_SEND_SLEEP; sleep_num_increments = 0; } } } #if VERBOSE_LOG Logger::getLogger()->info("SendingProcess sendData thread: sent %lu total '%s'", totSent, sendData->getDataSourceType().c_str()); #endif if (sendData->getUpdateDb()) { // Update counters to Database sendData->updateDatabaseCounters(); // Reset current sent readings sendData->resetSentReadings(); sendData->setUpdateDb(false); } /** * The loop is over: unlock the loadData thread */ unique_lock<mutex> lock(waitMutex); cond_var.notify_one(); } ================================================ FILE: C/tasks/purge_system/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (purge_system) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(COMMON_LIB common-lib) set(PLUGINS_COMMON_LIB plugins-common-lib) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) include_directories(.) include_directories(include) include_directories(../../thirdparty/Simple-Web-Server) include_directories(../../thirdparty/rapidjson/include) include_directories(../../common/include) file(GLOB purge_system_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(${PROJECT_NAME} ${purge_system_src} ${common_src}) target_link_libraries(${PROJECT_NAME} ${Boost_LIBRARIES}) target_link_libraries(${PROJECT_NAME} ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) target_link_libraries(${PROJECT_NAME} ${PLUGINS_COMMON_LIB}) install(TARGETS purge_system RUNTIME DESTINATION fledge/tasks) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(purge_system ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(purge_system ws2_32 wsock32) endif() endif() ================================================ FILE: C/tasks/purge_system/include/purge_system.h ================================================ #ifndef _PURGE_SYSTEM_H #define _PURGE_SYSTEM_H /* * Fledge Statistics History * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <process.h> #define TO_STRING(...) DEFER(TO_STRING_)(__VA_ARGS__) #define DEFER(x) x #define TO_STRING_(...) #__VA_ARGS__ #define QUOTE(...) TO_STRING(__VA_ARGS__) #define LOG_NAME "purge_system" #define CONFIG_CATEGORY_DESCRIPTION "Configuration of the Purge System" #define CONFIG_CATEGORY_DISPLAY_NAME "Purge System" #define UTILITIES_CATEGORY "Utilities" class PurgeSystem : public FledgeProcess { public: PurgeSystem(int argc, char** argv); ~PurgeSystem(); void run(); private: Logger *m_logger; StorageClient *m_storage; unsigned long m_retainStatsHistory; unsigned long m_retainAuditLog; unsigned long m_retainTaskHistory; private: void raiseError(const char *reason, ...); void purgeExecution(); void purgeTable(const std::string& tableName, const std::string& fieldName, unsigned long retentionDays); void historicizeData(unsigned long retentionDays); ResultSet *extractData(const std::string& tableName, const std::string& fieldName, unsigned long retentionDays); void storeData(const std::string& tableDest, ResultSet *data); void processEnd() const; ConfigCategory configurationHandling(const std::string& config); }; #endif ================================================ FILE: C/tasks/purge_system/main.cpp ================================================ /* * Fledge statistics history task * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <purge_system.h> #include <logger.h> using namespace std; int main(int argc, char** argv) { try { PurgeSystem PurgeSystem(argc, argv); PurgeSystem.run(); } catch (const std::exception& e) { Logger::getLogger()->error("An error occurred during the execution, :%s: ", e.what()); exit(1); } catch (...) { std::exception_ptr p = std::current_exception(); string name = (p ? p.__cxa_exception_type()->name() : "null"); Logger::getLogger()->error("An error occurred during the execution, :%s: ", name.c_str() ); exit(1); } // Return success exit(0); } ================================================ FILE: C/tasks/purge_system/purge_system.cpp ================================================ /* * Fledge Purge System - purge tables in the fledge database * * Copyright (c) 2021 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Stefano Simonelli */ #include <purge_system.h> #include <logger.h> #include <cstdarg> /* va_list, va_start, va_arg, va_end */ #include <cstdlib> #include <thread> #include <csignal> using namespace std; volatile std::sig_atomic_t signalReceived = 0; static const string DEFAULT_CONFIG = QUOTE({ "retainStatsHistory": { "description": "The number of days for which full granularity statistics history is maintained.", "type": "integer", "default": "7", "displayName": "Statistics Retention", "order": "1", "minimum": "1" }, "retainAuditLog" : { "description": "The number of days for which audit trail data is retained", "type": "integer", "default": "30", "displayName": "Audit Retention", "order": "2", "minimum": "1" }, "retainTaskHistory" : { "description": "The number of days for which task history is retained", "type": "integer", "default": "30", "displayName": "Task Retention", "order": "3", "minimum": "1" } }); static void signalHandler(int signal) { signalReceived = signal; } /** * Error handler - logs the error and raises the exception */ void PurgeSystem::raiseError(const char *reason, ...) { //By default Syslog is limited to a message size of 1024 bytes char buffer[1024]; va_list ap; va_start(ap, reason); vsnprintf(buffer, sizeof(buffer), reason, ap); va_end(ap); m_logger->error("PurgeSystem raising error: %s", buffer); throw runtime_error(buffer); } /** * Constructor for Purge system */ PurgeSystem::PurgeSystem(int argc, char** argv) : FledgeProcess(argc, argv) { string paramName; paramName = getName(); m_logger = Logger::getLogger(); // Logger is created by FledgeProcess m_logger->info("PurgeSystem starting - parameters name :%s:", paramName.c_str() ); m_retainStatsHistory = 0; m_retainAuditLog = 0; m_retainTaskHistory = 0; m_storage = this->getStorageClient(); } /** * */ PurgeSystem::~PurgeSystem() { } /** * PurgeSystem run method, called by the base class * to start the process and do the actual work. */ void PurgeSystem::run() { // We handle these signals, add more if needed std::signal(SIGINT, signalHandler); std::signal(SIGSTOP, signalHandler); std::signal(SIGTERM, signalHandler); ConfigCategory configuration = configurationHandling(DEFAULT_CONFIG); try { m_retainStatsHistory = strtoul(configuration.getValue("retainStatsHistory").c_str(), nullptr, 10); m_retainAuditLog = strtoul(configuration.getValue("retainAuditLog").c_str(), nullptr, 10); m_retainTaskHistory = strtoul(configuration.getValue("retainTaskHistory").c_str(), nullptr, 10); } catch (const std::exception &e) { raiseError ("impossible to retrieve the configuration :%s:", e.what() ); } m_logger->info("configuration retainStatsHistory :%d: retainAuditLog :%d: retainTaskHistory :%d:" ,m_retainStatsHistory ,m_retainAuditLog ,m_retainTaskHistory); if (!m_dryRun) { purgeExecution(); } processEnd(); } /** * Retrieves and store the configuration * * @param config Default configuration */ ConfigCategory PurgeSystem::configurationHandling(const std::string& config) { // retrieves the configuration using the value of the --name parameter // (received in the command line) as the key string categoryName(this->getName()); ConfigCategory configuration; ManagementClient *client = this->getManagementClient(); m_logger->debug("%s - categoryName :%s:", __FUNCTION__, categoryName.c_str()); // Create category, with "default" values only DefaultConfigCategory defaultConfig(categoryName, config); defaultConfig.setDescription(CONFIG_CATEGORY_DESCRIPTION); defaultConfig.setDisplayName(CONFIG_CATEGORY_DISPLAY_NAME); // Create/Update category name (we pass keep_original_items=true) if (! client->addCategory(defaultConfig, true)) { raiseError ("Failure creating/updating configuration key :%s: ", categoryName.c_str() ); } // Purge system category as child of Utilities { vector<string> children; children.push_back(categoryName); ConfigCategories categories = client->getCategories(); try { bool found = false; for (unsigned int idx = 0; idx < categories.length(); idx++) { if (categories[idx]->getName().compare(UTILITIES_CATEGORY) == 0) { client->addChildCategories(UTILITIES_CATEGORY, children); found = true; } } if (!found) { raiseError("adding %s as a child of %s", categoryName.c_str(), UTILITIES_CATEGORY); } } catch (...) { std::exception_ptr p = std::current_exception(); string errorInfo = (p ? p.__cxa_exception_type()->name() : "null"); raiseError("adding %s as a child of %s - %s", categoryName.c_str(), UTILITIES_CATEGORY, errorInfo.c_str()); } } // Get the category with values and defaults configuration = client->getCategory(categoryName); return ConfigCategory(configuration); } /** * Execute the purge, store information in an historicization table and delete teh information * the tables currently handled are: * * - fledge.statistics_history * - fledge.tasks * - fledge.log */ void PurgeSystem::purgeExecution() { string tableName; m_logger->info("PurgeSystem running"); tableName = "statistics_history"; try { historicizeData(m_retainStatsHistory); purgeTable(tableName, "history_ts", m_retainStatsHistory); } catch (const std::exception &e) { raiseError ("Failure historicizing and purging table :%s: :%s:",tableName.c_str(), e.what() ); } purgeTable("tasks", "start_time", m_retainTaskHistory); purgeTable("log", "ts", m_retainAuditLog); } /** * Store statistics_history details information in a historicization table * * @param retentionDays Number of days to retain */ void PurgeSystem::historicizeData(unsigned long retentionDays) { string tableSource; string fieldName; string tableDest; ResultSet *data; tableSource="statistics_history"; fieldName="history_ts"; tableDest="statistics_history_daily"; m_logger->debug("%s - historicizing :%s: retention days :%d: ", __FUNCTION__, tableSource.c_str(), retentionDays); data = extractData(tableSource, fieldName, retentionDays); if (data->rowCount()) { try { storeData(tableDest, data); } catch (const std::exception &e) { ; } } delete data; } /** * Retrieve grouped information to historicize * * @param tableName Name of the table from which the records should be extracted * @param fieldName Timestamp on which the where condition should be based on * @param retentionDays Number of days to retain * * @return Retrieved recordset */ ResultSet *PurgeSystem::extractData(const std::string& tableName, const std::string& fieldName, unsigned long retentionDays) { ResultSet *data; string conditionValue; data = nullptr; const Condition conditionExpr(Older); conditionValue = to_string (retentionDays * 60 * 60 * 24); // the days should be expressed in seconds //conditionValue = to_string (retentionDays); Where *_where = new Where(fieldName, conditionExpr, conditionValue); Query _query(_where); // Alias handling is ignored because of the presence of the group by // vector<Returns *> _returns {}; // _returns.push_back(new Returns("date(history_ts)", "date") ); // _returns.push_back(new Returns("key") ); // _query.returns(_returns); Aggregate *_aggregate = new Aggregate("sum", "value"); _query.aggregate(_aggregate); _query.group("date(history_ts), key"); try { data = m_storage->queryTable(tableName, _query); if (data == nullptr) { raiseError ("Failure extracting data from the table :%s: ", tableName.c_str() ); } } catch (const std::exception &e) { raiseError ("Failure extracting data :%s:", e.what() ); } m_logger->debug("%s - %s rows extracted :%d:", __FUNCTION__, tableName.c_str(), data->rowCount() ); return (data); } /** * Store the content of the provided recordset in the given table * * @param tableDest Name of the table in which the recordset should be stored * @param data recordset to store on the table tableDest */ void PurgeSystem::storeData(const std::string& tableDest, ResultSet *data) { long fieldYear; string fieldDate; string fieldKey; long fieldValue = 0; int affected = 0; bool retrieved; try { m_logger->debug("%s - storing in :%s: rows :%d:", __FUNCTION__, tableDest.c_str(), data->rowCount() ); ResultSet::RowIterator item = data->firstRow(); do { ResultSet::Row* row = *item; if (row) { // SQLite and PostgreSQL plugins behave differently, it initially tries the code for SQLite and in case // of an error it executes the PostgreSQL one try { fieldDate = row->getColumn("date(history_ts)")->getString(); retrieved = true; } catch (...) { retrieved = false; } if (! retrieved) { fieldDate = row->getColumn("date")->getString(); } fieldYear = strtol(fieldDate.substr(0, 4).c_str(), nullptr, 10); fieldKey = row->getColumn("key")->getString(); // SQLite and PostgreSQL plugins behave differently, it initially tries the code for SQLite and in case // of an error it executes the PostgreSQL one try { fieldValue = row->getColumn("sum_value")->getInteger(); retrieved = true; } catch (...) { retrieved = false; } if (! retrieved) { fieldValue = strtol(row->getColumn("sum_value")->getString(), nullptr, 10); } InsertValues values; values.push_back(InsertValue("year", fieldYear) ); values.push_back(InsertValue("day", fieldDate) ); values.push_back(InsertValue("key", fieldKey) ); values.push_back(InsertValue("value", fieldValue) ); m_logger->debug("%s - :%s: inserting :%ld: :%s: :%s: :%ld: ", __FUNCTION__, tableDest.c_str() , fieldYear , fieldDate.c_str() , fieldKey.c_str() , fieldValue); affected = m_storage->insertTable(tableDest, values); if (affected == -1) { raiseError ("Failure inserting rows into :%s: ", tableDest.c_str() ); } } } while (!data->isLastRow(item++)); } catch (const std::exception &e) { raiseError ("Failure inserting rows into :%s: error :%s: ", tableDest.c_str(), e.what() ); } } /** * Purge the content of the given table from the information older than a provided number of days * * @param tableName Name of the table to purge * @param fieldName Timestamp on which the where condition should be based on * @param retentionDays Number of days to retain */ void PurgeSystem::purgeTable(const std::string& tableName, const std::string& fieldName, unsigned long retentionDays) { int affected; string conditionValue; affected = 0; const Condition conditionExpr(Older); conditionValue = to_string (retentionDays * 60 * 60 * 24); // the days should be expressed in seconds //conditionValue = to_string (retentionDays); m_logger->debug("%s - purging :%s: retention days :%d: conditionValue :%s:", __FUNCTION__, tableName.c_str(), retentionDays, conditionValue.c_str() ); Where *_where = new Where(fieldName, conditionExpr, conditionValue); Query _query(_where); try { affected = m_storage->deleteTable(tableName, _query); if (affected == -1) { raiseError ("Failure purging the table :%s: ", tableName.c_str() ); } } catch (const std::exception &e) { raiseError ("Failure purging the table :%s: ", tableName.c_str() ); } m_logger->debug("%s - %s rows purged :%d:", __FUNCTION__, tableName.c_str(), affected); } /** * Terminate the operation * */ void PurgeSystem::processEnd() const { m_logger->info("PurgeSystem completed"); } ================================================ FILE: C/tasks/statistics_history/CMakeLists.txt ================================================ cmake_minimum_required (VERSION 2.8.8) project (statistics_history) set(CMAKE_CXX_FLAGS_DEBUG "-O0 -ggdb") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wextra -Wsign-conversion") set(UUIDLIB -luuid) set(COMMON_LIB common-lib services-common-lib) include_directories(. include ../../thirdparty/Simple-Web-Server ../../thirdparty/rapidjson/include ../../common/include) find_package(Threads REQUIRED) set(BOOST_COMPONENTS system thread) find_package(Boost 1.53.0 COMPONENTS ${BOOST_COMPONENTS} REQUIRED) include_directories(SYSTEM ${Boost_INCLUDE_DIR}) file(GLOB statistics_history_src "*.cpp") link_directories(${PROJECT_BINARY_DIR}/../../lib) add_executable(statistics_history ${statistics_history_src} ${common_src}) target_link_libraries(statistics_history ${Boost_LIBRARIES}) target_link_libraries(statistics_history ${CMAKE_THREAD_LIBS_INIT}) target_link_libraries(statistics_history ${UUIDLIB}) target_link_libraries(${PROJECT_NAME} ${COMMON_LIB}) install(TARGETS statistics_history RUNTIME DESTINATION fledge/tasks) if(MSYS) #TODO: Is MSYS true when MSVC is true? target_link_libraries(statistics_history ws2_32 wsock32) if(OPENSSL_FOUND) target_link_libraries(statistics_history ws2_32 wsock32) endif() endif() ================================================ FILE: C/tasks/statistics_history/include/stats_history.h ================================================ #ifndef _STATISTICS_HISTORY_H #define _STATISTICS_HISTORY_H /* * Fledge Statistics History * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <process.h> #include <vector> #include <string> #include <utility> /** * StatisticsHisotry class */ class StatsHistory : public FledgeProcess { public: // Constructor: StatsHistory(int argc, char** argv); // Destructor ~StatsHistory(); void run() const; private: void processKey(const std::string& key, std::vector<InsertValues> &historyValues, std::vector<std::pair<InsertValue *, Where *> > &updateValues, std::string dateTimeStr, int val , int prev) const; std::string getTime(void) const; }; #endif ================================================ FILE: C/tasks/statistics_history/main.cpp ================================================ /* * Fledge statistics history task * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <stats_history.h> using namespace std; int main(int argc, char** argv) { try { // Instantiate StatsHistory class StatsHistory statisticsHistory(argc, argv); statisticsHistory.run(); } catch (const std::exception& e) { Logger::getLogger()->fatal("Exception %s starting Stats History task", e.what()); // Return failure for class instance/configuration etc exit(1); } // Catch all exceptions catch (...) { std::exception_ptr p = std::current_exception(); string name = (p ? p.__cxa_exception_type()->name() : "null"); Logger::getLogger()->fatal("Exception %s starting Stats History task", name); exit(1); } // Return success exit(0); } ================================================ FILE: C/tasks/statistics_history/stats_history.cpp ================================================ /* * Fledge Statistics History * * Copyright (c) 2018 Dianomic Systems * * Released under the Apache 2.0 Licence * * Author: Mark Riddoch */ #include <stats_history.h> #include <csignal> #include <time.h> #include <sys/time.h> #include "string_utils.h" #define DATETIME_MAX_LEN 52 #define MICROSECONDS_FORMAT_LEN 10 #define DATETIME_FORMAT_DEFAULT "%Y-%m-%d %H:%M:%S" using namespace std; volatile std::sig_atomic_t signalReceived = 0; /** * Handle Signals */ static void signalHandler(int signal) { signalReceived = signal; } /** * Constructor for Statistics history task */ StatsHistory::StatsHistory(int argc, char** argv) : FledgeProcess(argc, argv) { Logger::getLogger()->info("StatsHistory starting"); } /** * StatsHistory class methods */ StatsHistory::~StatsHistory() { } /** * Statisitics History run method, called by the base class * to start the process and do the actual work. */ void StatsHistory::run() const { // We handle these signals, add more if needed std::signal(SIGINT, signalHandler); std::signal(SIGSTOP, signalHandler); std::signal(SIGTERM, signalHandler); if (m_dryRun) return; // Get the set of distinct statistics keys Query query(new Returns("key")); query.distinct(); query.returns(new Returns("value")); query.returns(new Returns("previous_value")); ResultSet *keySet = getStorageClient()->queryTable("statistics", query); ResultSet::RowIterator rowIter = keySet->firstRow(); std::vector<InsertValues> historyValues; vector<pair<InsertValue *, Where *>> updateValues; std::string dateTimeStr = getTime(); while (keySet->hasNextRow(rowIter) || keySet->isLastRow(rowIter) ) { string key = (*rowIter)->getColumn("key")->getString(); int val = (*rowIter)->getColumn("value")->getInteger(); int prev = (*rowIter)->getColumn("previous_value")->getInteger(); try { processKey(key, historyValues, updateValues, dateTimeStr, val, prev); } catch (exception& e) { getLogger()->error("Failed to process statisitics key %s, %s", key, e.what()); } if (!keySet->isLastRow(rowIter)) rowIter = keySet->nextRow(rowIter); else break; } int n_rows; if ((n_rows = getStorageClient()->insertTable("statistics_history", historyValues)) < 1) { getLogger()->error("Failed to insert rows to statistics history table "); } if (getStorageClient()->updateTable("statistics", updateValues) < 1) { getLogger()->error("Failed to update rows to statistics table"); } for (auto it = updateValues.begin(); it != updateValues.end() ; ++it) { InsertValue *updateValue = it->first; if (updateValue) { delete updateValue; updateValue=nullptr; } Where *wKey = it->second; if(wKey) { delete wKey; wKey = nullptr; } } delete keySet; } /** * Process statistics keys * * @param key The statistics key to process * @param historyValues Values to be inserted in statistics_history * @param updateValues Values to be updated in statistics * @param dateTimeStr Local time with microseconds precision * @param val int * @param prev int * @return void */ void StatsHistory::processKey(const std::string& key, std::vector<InsertValues> &historyValues, std::vector<std::pair<InsertValue*, Where *> > &updateValues, std::string dateTimeStr, int val, int prev) const { InsertValues iValue; // Insert the row into the statistics history // create an object of InsertValues and push in historyValues vector // for batch insertion string escaped_key = escape(key); iValue.push_back(InsertValue("key", escaped_key)); iValue.push_back(InsertValue("value", val - prev)); iValue.push_back(InsertValue("history_ts", dateTimeStr)); historyValues.push_back(iValue); // Update the previous value in the statistics row // create an object of InsertValue and push in updateValues vector // for batch updation InsertValue *updateValue = new InsertValue("previous_value", val); Where *wKey = new Where("key", Equals, escaped_key); updateValues.emplace_back(updateValue, wKey); } /** * getTime() function returns the localTime with microseconds precision * * @param void * @return std::string localTime */ std::string StatsHistory::getTime(void) const { struct timeval tv ; struct tm* timeinfo; gettimeofday(&tv, NULL); timeinfo = gmtime(&tv.tv_sec); char date_time[DATETIME_MAX_LEN]; // Create datetime with seconds strftime(date_time, sizeof(date_time), DATETIME_FORMAT_DEFAULT, timeinfo); std::string dateTimeLocal = date_time; char micro_s[MICROSECONDS_FORMAT_LEN]; // Add microseconds snprintf(micro_s, sizeof(micro_s), ".%06lu", tv.tv_usec); dateTimeLocal.append(micro_s); return dateTimeLocal; } ================================================ FILE: C/thirdparty/Simple-Web-Server/CMakeLists.txt ================================================ cmake_minimum_required(VERSION 3.0) project(Simple-Web-Server) option(USE_STANDALONE_ASIO "set ON to use standalone Asio instead of Boost.Asio" OFF) if(CMAKE_SOURCE_DIR STREQUAL "${CMAKE_CURRENT_SOURCE_DIR}") option(BUILD_TESTING "set ON to build library tests" ON) else() option(BUILD_TESTING "set ON to build library tests" OFF) endif() option(BUILD_FUZZING "set ON to build library fuzzers" OFF) option(USE_OPENSSL "set OFF to build without OpenSSL" ON) add_library(simple-web-server INTERFACE) target_include_directories(simple-web-server INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}) find_package(Threads REQUIRED) target_link_libraries(simple-web-server INTERFACE ${CMAKE_THREAD_LIBS_INIT}) # TODO 2020 when Debian Jessie LTS ends: # Remove Boost system, thread, regex components; use Boost::<component> aliases; remove Boost target_include_directories if(USE_STANDALONE_ASIO) target_compile_definitions(simple-web-server INTERFACE USE_STANDALONE_ASIO) find_path(ASIO_PATH asio.hpp) if(NOT ASIO_PATH) message(FATAL_ERROR "Standalone Asio not found") else() target_include_directories(simple-web-server INTERFACE ${ASIO_PATH}) endif() else() find_package(Boost 1.53.0 COMPONENTS system thread REQUIRED) target_link_libraries(simple-web-server INTERFACE ${Boost_LIBRARIES}) target_include_directories(simple-web-server INTERFACE ${Boost_INCLUDE_DIR}) if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.9) target_compile_definitions(simple-web-server INTERFACE USE_BOOST_REGEX) find_package(Boost 1.53.0 COMPONENTS regex REQUIRED) target_link_libraries(simple-web-server INTERFACE ${Boost_LIBRARIES}) target_include_directories(simple-web-server INTERFACE ${Boost_INCLUDE_DIR}) endif() endif() if(WIN32) target_link_libraries(simple-web-server INTERFACE ws2_32 wsock32) endif() if(APPLE) set(OPENSSL_ROOT_DIR "/usr/local/opt/openssl") endif() if(USE_OPENSSL) find_package(OpenSSL) endif() if(OPENSSL_FOUND) target_compile_definitions(simple-web-server INTERFACE HAVE_OPENSSL) target_link_libraries(simple-web-server INTERFACE ${OPENSSL_LIBRARIES}) target_include_directories(simple-web-server INTERFACE ${OPENSSL_INCLUDE_DIR}) endif() # If Simple-Web-Server is not a sub-project: if(CMAKE_SOURCE_DIR STREQUAL "${CMAKE_CURRENT_SOURCE_DIR}") if(NOT MSVC) add_compile_options(-std=c++11 -Wall -Wextra) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wthread-safety) endif() else() add_compile_options(/W1) endif() find_package(Boost 1.53.0 COMPONENTS system thread filesystem) if(Boost_FOUND) add_executable(http_examples http_examples.cpp) target_link_libraries(http_examples simple-web-server) target_link_libraries(http_examples ${Boost_LIBRARIES}) target_include_directories(http_examples PRIVATE ${Boost_INCLUDE_DIR}) if(OPENSSL_FOUND) add_executable(https_examples https_examples.cpp) target_link_libraries(https_examples simple-web-server) target_link_libraries(https_examples ${Boost_LIBRARIES}) target_include_directories(https_examples PRIVATE ${Boost_INCLUDE_DIR}) endif() endif() install(FILES asio_compatibility.hpp server_http.hpp client_http.hpp server_https.hpp client_https.hpp crypto.hpp utility.hpp status_code.hpp mutex.hpp DESTINATION include/simple-web-server) endif() if(BUILD_TESTING OR BUILD_FUZZING) if(BUILD_TESTING) enable_testing() endif() add_subdirectory(tests) endif() ================================================ FILE: C/thirdparty/Simple-Web-Server/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2014-2020 Ole Christian Eidheim Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: C/thirdparty/Simple-Web-Server/README.md ================================================ # Simple-Web-Server A very simple, fast, multithreaded, platform independent HTTP and HTTPS server and client library implemented using C++11 and Asio (both Boost.Asio and standalone Asio can be used). Created to be an easy way to make REST resources available from C++ applications. See https://gitlab.com/eidheim/Simple-WebSocket-Server for an easy way to make WebSocket/WebSocket Secure endpoints in C++. Also, feel free to check out the new C++ IDE supporting C++11/14/17: https://gitlab.com/cppit/jucipp. ## Features * Asynchronous request handling * Thread pool if needed * Platform independent * HTTP/1.1 supported, including persistent connections * HTTPS supported * Chunked transfer encoding and server-sent events * Can set timeouts for request/response and content * Can set max request/response size * Sending outgoing messages is thread safe * Client creates necessary connections and perform reconnects when needed See also [benchmarks](https://gitlab.com/eidheim/Simple-Web-Server/blob/master/docs/benchmarks.md) for a performance comparisons to a few other HTTP libraries. ## Usage See [http_examples.cpp](https://gitlab.com/eidheim/Simple-Web-Server/blob/master/http_examples.cpp) or [https_examples.cpp](https://gitlab.com/eidheim/Simple-Web-Server/blob/master/https_examples.cpp) for example usage. The following server resources are setup using regular expressions to match request paths: * `POST /string` - responds with the posted string. * `POST /json` - parses the request content as JSON, and responds with some of the parsed values. * `GET /info` - responds with information extracted from the request. * `GET /match/([0-9]+)` - matches for instance `/match/123` and responds with the matched number `123`. * `GET /work` - starts a thread, simulating heavy work, and responds when the work is done. * `GET` - a special default_resource handler is called when a request path does not match any of the above resources. This resource responds with the content of files in the `web/`-folder if the request path identifies one of these files. [Documentation](https://eidheim.gitlab.io/Simple-Web-Server/annotated.html) is also available, generated from the master branch. ## Dependencies * Boost.Asio or standalone Asio * Boost is required to compile the examples * For HTTPS: OpenSSL libraries Installation instructions for the dependencies needed to compile the examples on a selection of platforms can be seen below. Default build with Boost.Asio is assumed. Turn on CMake option `USE_STANDALONE_ASIO` to instead use standalone Asio. ### Debian based distributions ```sh sudo apt-get install libssl-dev libboost-filesystem-dev libboost-thread-dev ``` ### Arch Linux based distributions ```sh sudo pacman -S boost ``` ### MacOS ```sh brew install openssl boost ``` ## Compile and run Compile with a C++11 compliant compiler: ```sh cmake -H. -Bbuild cmake --build build ``` ### HTTP Run the server and client examples: `./build/http_examples` Direct your favorite browser to for instance http://localhost:8080/ ### HTTPS Before running the server, an RSA private key (server.key) and an SSL certificate (server.crt) must be created. Run the server and client examples: `./build/https_examples` Direct your favorite browser to for instance https://localhost:8080/ ## Contributing Contributions are welcome, either by creating an issue or a merge request. However, before you create a new issue or merge request, please search for previous similar issues or requests. A response will normally be given within a few days. ================================================ FILE: C/thirdparty/Simple-Web-Server/asio_compatibility.hpp ================================================ #ifndef SIMPLE_WEB_ASIO_COMPATIBILITY_HPP #define SIMPLE_WEB_ASIO_COMPATIBILITY_HPP #include <memory> #ifdef USE_STANDALONE_ASIO #include <asio.hpp> #include <asio/steady_timer.hpp> namespace SimpleWeb { namespace error = asio::error; using error_code = std::error_code; using errc = std::errc; using system_error = std::system_error; namespace make_error_code = std; } // namespace SimpleWeb #else #include <boost/asio.hpp> #include <boost/asio/steady_timer.hpp> namespace SimpleWeb { namespace asio = boost::asio; namespace error = asio::error; using error_code = boost::system::error_code; namespace errc = boost::system::errc; using system_error = boost::system::system_error; namespace make_error_code = boost::system::errc; } // namespace SimpleWeb #endif namespace SimpleWeb { #if(USE_STANDALONE_ASIO && ASIO_VERSION >= 101300) || BOOST_ASIO_VERSION >= 101300 using io_context = asio::io_context; using resolver_results = asio::ip::tcp::resolver::results_type; using async_connect_endpoint = asio::ip::tcp::endpoint; template <typename handler_type> inline void post(io_context &context, handler_type &&handler) { asio::post(context, std::forward<handler_type>(handler)); } inline void restart(io_context &context) noexcept { context.restart(); } inline asio::ip::address make_address(const std::string &str) noexcept { return asio::ip::make_address(str); } template <typename socket_type, typename duration_type> std::unique_ptr<asio::steady_timer> make_steady_timer(socket_type &socket, std::chrono::duration<duration_type> duration) { return std::unique_ptr<asio::steady_timer>(new asio::steady_timer(socket.get_executor(), duration)); } template <typename handler_type> void async_resolve(asio::ip::tcp::resolver &resolver, const std::pair<std::string, std::string> &host_port, handler_type &&handler) { resolver.async_resolve(host_port.first, host_port.second, std::forward<handler_type>(handler)); } inline asio::executor_work_guard<io_context::executor_type> make_work_guard(io_context &context) { return asio::make_work_guard(context); } #else using io_context = asio::io_service; using resolver_results = asio::ip::tcp::resolver::iterator; using async_connect_endpoint = asio::ip::tcp::resolver::iterator; template <typename handler_type> inline void post(io_context &context, handler_type &&handler) { context.post(std::forward<handler_type>(handler)); } inline void restart(io_context &context) noexcept { context.reset(); } inline asio::ip::address make_address(const std::string &str) noexcept { return asio::ip::address::from_string(str); } template <typename socket_type, typename duration_type> std::unique_ptr<asio::steady_timer> make_steady_timer(socket_type &socket, std::chrono::duration<duration_type> duration) { return std::unique_ptr<asio::steady_timer>(new asio::steady_timer(socket.get_io_service(), duration)); } template <typename handler_type> void async_resolve(asio::ip::tcp::resolver &resolver, const std::pair<std::string, std::string> &host_port, handler_type &&handler) { resolver.async_resolve(asio::ip::tcp::resolver::query(host_port.first, host_port.second), std::forward<handler_type>(handler)); } inline io_context::work make_work_guard(io_context &context) { return io_context::work(context); } #endif } // namespace SimpleWeb #endif /* SIMPLE_WEB_ASIO_COMPATIBILITY_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/client_http.hpp ================================================ #ifndef SIMPLE_WEB_CLIENT_HTTP_HPP #define SIMPLE_WEB_CLIENT_HTTP_HPP #include "asio_compatibility.hpp" #include "mutex.hpp" #include "utility.hpp" #include <future> #include <limits> #include <random> #include <unordered_set> #include <vector> namespace SimpleWeb { class HeaderEndMatch { int crlfcrlf = 0; int lflf = 0; public: /// Match condition for asio::read_until to match both standard and non-standard HTTP header endings. std::pair<asio::buffers_iterator<asio::const_buffers_1>, bool> operator()(asio::buffers_iterator<asio::const_buffers_1> begin, asio::buffers_iterator<asio::const_buffers_1> end) { auto it = begin; for(; it != end; ++it) { if(*it == '\n') { if(crlfcrlf == 1) ++crlfcrlf; else if(crlfcrlf == 2) crlfcrlf = 0; else if(crlfcrlf == 3) return {++it, true}; if(lflf == 0) ++lflf; else if(lflf == 1) return {++it, true}; } else if(*it == '\r') { if(crlfcrlf == 0) ++crlfcrlf; else if(crlfcrlf == 2) ++crlfcrlf; else crlfcrlf = 0; lflf = 0; } else { crlfcrlf = 0; lflf = 0; } } return {it, false}; } }; } // namespace SimpleWeb #ifndef USE_STANDALONE_ASIO namespace boost { #endif namespace asio { template <> struct is_match_condition<SimpleWeb::HeaderEndMatch> : public std::true_type {}; } // namespace asio #ifndef USE_STANDALONE_ASIO } // namespace boost #endif namespace SimpleWeb { template <class socket_type> class Client; template <class socket_type> class ClientBase { public: class Content : public std::istream { friend class ClientBase<socket_type>; public: std::size_t size() noexcept { return streambuf.size(); } /// Convenience function to return content as a string. std::string string() noexcept { return std::string(asio::buffers_begin(streambuf.data()), asio::buffers_end(streambuf.data())); } /// When true, this is the last response content part from server for the current request. bool end = true; private: asio::streambuf &streambuf; Content(asio::streambuf &streambuf) noexcept : std::istream(&streambuf), streambuf(streambuf) {} }; protected: class Connection; public: class Response { friend class ClientBase<socket_type>; friend class Client<socket_type>; class Shared { public: std::string http_version, status_code; CaseInsensitiveMultimap header; }; asio::streambuf streambuf; std::shared_ptr<Shared> shared; std::weak_ptr<Connection> connection_weak; Response(std::size_t max_response_streambuf_size, const std::shared_ptr<Connection> &connection_) noexcept : streambuf(max_response_streambuf_size), shared(new Shared()), connection_weak(connection_), http_version(shared->http_version), status_code(shared->status_code), header(shared->header), content(streambuf) {} /// Constructs a response object that has empty content, but otherwise is equal to the response parameter Response(const Response &response) noexcept : streambuf(response.streambuf.max_size()), shared(response.shared), connection_weak(response.connection_weak), http_version(shared->http_version), status_code(shared->status_code), header(shared->header), content(streambuf) {} public: std::string &http_version, &status_code; CaseInsensitiveMultimap &header; Content content; /// Closes the connection to the server, preventing further response content parts from server. void close() noexcept { if(auto connection = this->connection_weak.lock()) connection->close(); } }; class Config { friend class ClientBase<socket_type>; private: Config() noexcept {} public: /// Set timeout on requests in seconds. Default value: 0 (no timeout). long timeout = 0; /// Set connect timeout in seconds. Default value: 0 (Config::timeout is then used instead). long timeout_connect = 0; /// Maximum size of response stream buffer. Defaults to architecture maximum. /// Reaching this limit will result in a message_size error code. std::size_t max_response_streambuf_size = (std::numeric_limits<std::size_t>::max)(); /// Set proxy server (server:port) std::string proxy_server; }; protected: class Connection : public std::enable_shared_from_this<Connection> { public: template <typename... Args> Connection(std::shared_ptr<ScopeRunner> handler_runner_, Args &&... args) noexcept : handler_runner(std::move(handler_runner_)), socket(new socket_type(std::forward<Args>(args)...)) {} std::shared_ptr<ScopeRunner> handler_runner; std::unique_ptr<socket_type> socket; // Socket must be unique_ptr since asio::ssl::stream<asio::ip::tcp::socket> is not movable bool in_use = false; bool attempt_reconnect = true; std::unique_ptr<asio::steady_timer> timer; void close() noexcept { error_code ec; socket->lowest_layer().shutdown(asio::ip::tcp::socket::shutdown_both, ec); socket->lowest_layer().cancel(ec); } void set_timeout(long seconds) noexcept { if(seconds == 0) { timer = nullptr; return; } timer = make_steady_timer(*socket, std::chrono::seconds(seconds)); std::weak_ptr<Connection> self_weak(this->shared_from_this()); // To avoid keeping Connection instance alive longer than needed timer->async_wait([self_weak](const error_code &ec) { if(!ec) { if(auto self = self_weak.lock()) self->close(); } }); } void cancel_timeout() noexcept { if(timer) { try { timer->cancel(); } catch(...) { } } } }; class Session { public: Session(std::size_t max_response_streambuf_size, std::shared_ptr<Connection> connection_, std::unique_ptr<asio::streambuf> request_streambuf_) noexcept : connection(std::move(connection_)), request_streambuf(std::move(request_streambuf_)), response(new Response(max_response_streambuf_size, connection)) {} std::shared_ptr<Connection> connection; std::unique_ptr<asio::streambuf> request_streambuf; std::shared_ptr<Response> response; std::function<void(const error_code &)> callback; }; public: /// Set before calling a request function. Config config; /// If you want to reuse an already created asio::io_service, store its pointer here before calling a request function. /// Do not set when using synchronous request functions. std::shared_ptr<io_context> io_service; /// Convenience function to perform synchronous request. The io_service is started in this function. /// Should not be combined with asynchronous request functions. /// If you reuse the io_service for other tasks, use the asynchronous request functions instead. /// When requesting Server-Sent Events: will throw on error::eof, please use asynchronous request functions instead. std::shared_ptr<Response> request(const std::string &method, const std::string &path = {"/"}, string_view content = {}, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { return sync_request(method, path, content, header); } /// Convenience function to perform synchronous request. The io_service is started in this function. /// Should not be combined with asynchronous request functions. /// If you reuse the io_service for other tasks, use the asynchronous request functions instead. /// When requesting Server-Sent Events: will throw on error::eof, please use asynchronous request functions instead. std::shared_ptr<Response> request(const std::string &method, const std::string &path, std::istream &content, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { return sync_request(method, path, content, header); } /// Asynchronous request where running Client's io_service is required. /// Do not use concurrently with the synchronous request functions. /// When requesting Server-Sent Events: request_callback might be called more than twice, first call with empty contents on open, and with ec = error::eof on last call void request(const std::string &method, const std::string &path, string_view content, const CaseInsensitiveMultimap &header, std::function<void(std::shared_ptr<Response>, const error_code &)> &&request_callback_) { auto session = std::make_shared<Session>(config.max_response_streambuf_size, get_connection(), create_request_header(method, path, header)); std::weak_ptr<Session> session_weak(session); // To avoid keeping session alive longer than needed auto request_callback = std::make_shared<std::function<void(std::shared_ptr<Response>, const error_code &)>>(std::move(request_callback_)); session->callback = [this, session_weak, request_callback](const error_code &ec) { if(auto session = session_weak.lock()) { if(session->response->content.end) { session->connection->cancel_timeout(); session->connection->in_use = false; } { LockGuard lock(this->connections_mutex); // Remove unused connections, but keep one open for HTTP persistent connection: std::size_t unused_connections = 0; for(auto it = this->connections.begin(); it != this->connections.end();) { if(ec && session->connection == *it) it = this->connections.erase(it); else if((*it)->in_use) ++it; else { ++unused_connections; if(unused_connections > 1) it = this->connections.erase(it); else ++it; } } } if(*request_callback) (*request_callback)(session->response, ec); } }; std::ostream write_stream(session->request_streambuf.get()); if(content.size() > 0) { auto header_it = header.find("Content-Length"); if(header_it == header.end()) { header_it = header.find("Transfer-Encoding"); if(header_it == header.end() || header_it->second != "chunked") write_stream << "Content-Length: " << content.size() << "\r\n"; } } write_stream << "\r\n"; write_stream.write(content.data(), static_cast<std::streamsize>(content.size())); connect(session); } /// Asynchronous request where running Client's io_service is required. /// Do not use concurrently with the synchronous request functions. /// When requesting Server-Sent Events: request_callback might be called more than twice, first call with empty contents on open, and with ec = error::eof on last call void request(const std::string &method, const std::string &path, string_view content, std::function<void(std::shared_ptr<Response>, const error_code &)> &&request_callback_) { request(method, path, content, CaseInsensitiveMultimap(), std::move(request_callback_)); } /// Asynchronous request where running Client's io_service is required. /// Do not use concurrently with the synchronous request functions. /// When requesting Server-Sent Events: request_callback might be called more than twice, first call with empty contents on open, and with ec = error::eof on last call void request(const std::string &method, const std::string &path, std::function<void(std::shared_ptr<Response>, const error_code &)> &&request_callback_) { request(method, path, std::string(), CaseInsensitiveMultimap(), std::move(request_callback_)); } /// Asynchronous request where running Client's io_service is required. /// Do not use concurrently with the synchronous request functions. /// When requesting Server-Sent Events: request_callback might be called more than twice, first call with empty contents on open, and with ec = error::eof on last call void request(const std::string &method, std::function<void(std::shared_ptr<Response>, const error_code &)> &&request_callback_) { request(method, std::string("/"), std::string(), CaseInsensitiveMultimap(), std::move(request_callback_)); } /// Asynchronous request where running Client's io_service is required. /// Do not use concurrently with the synchronous request functions. /// When requesting Server-Sent Events: request_callback might be called more than twice, first call with empty contents on open, and with ec = error::eof on last call void request(const std::string &method, const std::string &path, std::istream &content, const CaseInsensitiveMultimap &header, std::function<void(std::shared_ptr<Response>, const error_code &)> &&request_callback_) { auto session = std::make_shared<Session>(config.max_response_streambuf_size, get_connection(), create_request_header(method, path, header)); std::weak_ptr<Session> session_weak(session); // To avoid keeping session alive longer than needed auto request_callback = std::make_shared<std::function<void(std::shared_ptr<Response>, const error_code &)>>(std::move(request_callback_)); session->callback = [this, session_weak, request_callback](const error_code &ec) { if(auto session = session_weak.lock()) { if(session->response->content.end) { session->connection->cancel_timeout(); session->connection->in_use = false; } { LockGuard lock(this->connections_mutex); // Remove unused connections, but keep one open for HTTP persistent connection: std::size_t unused_connections = 0; for(auto it = this->connections.begin(); it != this->connections.end();) { if(ec && session->connection == *it) it = this->connections.erase(it); else if((*it)->in_use) ++it; else { ++unused_connections; if(unused_connections > 1) it = this->connections.erase(it); else ++it; } } } if(*request_callback) (*request_callback)(session->response, ec); } }; content.seekg(0, std::ios::end); auto content_length = content.tellg(); content.seekg(0, std::ios::beg); std::ostream write_stream(session->request_streambuf.get()); if(content_length > 0) { auto header_it = header.find("Content-Length"); if(header_it == header.end()) { header_it = header.find("Transfer-Encoding"); if(header_it == header.end() || header_it->second != "chunked") write_stream << "Content-Length: " << content_length << "\r\n"; } } write_stream << "\r\n"; if(content_length > 0) write_stream << content.rdbuf(); connect(session); } /// Asynchronous request where running Client's io_service is required. /// Do not use concurrently with the synchronous request functions. /// When requesting Server-Sent Events: request_callback might be called more than twice, first call with empty contents on open, and with ec = error::eof on last call void request(const std::string &method, const std::string &path, std::istream &content, std::function<void(std::shared_ptr<Response>, const error_code &)> &&request_callback_) { request(method, path, content, CaseInsensitiveMultimap(), std::move(request_callback_)); } /// Close connections. void stop() noexcept { LockGuard lock(connections_mutex); for(auto it = connections.begin(); it != connections.end();) { (*it)->close(); it = connections.erase(it); } } virtual ~ClientBase() noexcept { handler_runner->stop(); stop(); if(internal_io_service) io_service->stop(); } protected: bool internal_io_service = false; std::string host; unsigned short port; unsigned short default_port; std::unique_ptr<std::pair<std::string, std::string>> host_port; Mutex connections_mutex; std::unordered_set<std::shared_ptr<Connection>> connections GUARDED_BY(connections_mutex); std::shared_ptr<ScopeRunner> handler_runner; Mutex synchronous_request_mutex; bool synchronous_request_called GUARDED_BY(synchronous_request_mutex) = false; ClientBase(const std::string &host_port, unsigned short default_port) noexcept : default_port(default_port), handler_runner(new ScopeRunner()) { auto parsed_host_port = parse_host_port(host_port, default_port); host = parsed_host_port.first; port = parsed_host_port.second; } template <typename ContentType> std::shared_ptr<Response> sync_request(const std::string &method, const std::string &path, ContentType &content, const CaseInsensitiveMultimap &header) { { LockGuard lock(synchronous_request_mutex); if(!synchronous_request_called) { if(io_service) // Throw if io_service already set throw make_error_code::make_error_code(errc::operation_not_permitted); io_service = std::make_shared<io_context>(); internal_io_service = true; auto io_service_ = io_service; std::thread thread([io_service_] { auto work = make_work_guard(*io_service_); io_service_->run(); }); thread.detach(); synchronous_request_called = true; } } std::shared_ptr<Response> response; std::promise<std::shared_ptr<Response>> response_promise; auto stop_future_handlers = std::make_shared<bool>(false); request(method, path, content, header, [&response, &response_promise, stop_future_handlers](std::shared_ptr<Response> response_, error_code ec) { if(*stop_future_handlers) return; if(!response) response = response_; else if(!ec) { if(response_->streambuf.size() + response->streambuf.size() > response->streambuf.max_size()) { ec = make_error_code::make_error_code(errc::message_size); response->close(); } else { // Move partial response_ content to response: auto &source = response_->streambuf; auto &target = response->streambuf; target.commit(asio::buffer_copy(target.prepare(source.size()), source.data())); source.consume(source.size()); } } if(ec) { response_promise.set_exception(std::make_exception_ptr(system_error(ec))); *stop_future_handlers = true; } else if(response_->content.end) response_promise.set_value(response); }); return response_promise.get_future().get(); } std::shared_ptr<Connection> get_connection() noexcept { std::shared_ptr<Connection> connection; LockGuard lock(connections_mutex); if(!io_service) { io_service = std::make_shared<io_context>(); internal_io_service = true; } for(auto it = connections.begin(); it != connections.end(); ++it) { if(!(*it)->in_use) { connection = *it; break; } } if(!connection) { connection = create_connection(); connections.emplace(connection); } connection->attempt_reconnect = true; connection->in_use = true; if(!host_port) { if(config.proxy_server.empty()) host_port = std::unique_ptr<std::pair<std::string, std::string>>(new std::pair<std::string, std::string>(host, std::to_string(port))); else { auto proxy_host_port = parse_host_port(config.proxy_server, 8080); host_port = std::unique_ptr<std::pair<std::string, std::string>>(new std::pair<std::string, std::string>(proxy_host_port.first, std::to_string(proxy_host_port.second))); } } return connection; } std::pair<std::string, unsigned short> parse_host_port(const std::string &host_port, unsigned short default_port) const noexcept { std::pair<std::string, unsigned short> parsed_host_port; std::size_t host_end = host_port.find(':'); if(host_end == std::string::npos) { parsed_host_port.first = host_port; parsed_host_port.second = default_port; } else { parsed_host_port.first = host_port.substr(0, host_end); try { parsed_host_port.second = static_cast<unsigned short>(std::stoul(host_port.substr(host_end + 1))); } catch(...) { parsed_host_port.second = default_port; } } return parsed_host_port; } virtual std::shared_ptr<Connection> create_connection() noexcept = 0; virtual void connect(const std::shared_ptr<Session> &) = 0; std::unique_ptr<asio::streambuf> create_request_header(const std::string &method, const std::string &path, const CaseInsensitiveMultimap &header) const { auto corrected_path = path; if(corrected_path == "") corrected_path = "/"; if(!config.proxy_server.empty() && std::is_same<socket_type, asio::ip::tcp::socket>::value) corrected_path = "http://" + host + ':' + std::to_string(port) + corrected_path; std::unique_ptr<asio::streambuf> streambuf(new asio::streambuf()); std::ostream write_stream(streambuf.get()); write_stream << method << " " << corrected_path << " HTTP/1.1\r\n"; write_stream << "Host: " << host; if(port != default_port) write_stream << ':' << std::to_string(port); write_stream << "\r\n"; for(auto &h : header) write_stream << h.first << ": " << h.second << "\r\n"; return streambuf; } void write(const std::shared_ptr<Session> &session) { session->connection->set_timeout(config.timeout); asio::async_write(*session->connection->socket, session->request_streambuf->data(), [this, session](const error_code &ec, std::size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) this->read(session); else { if(session->connection->attempt_reconnect && ec != error::operation_aborted) reconnect(session, ec); else session->callback(ec); } }); } void read(const std::shared_ptr<Session> &session) { asio::async_read_until(*session->connection->socket, session->response->streambuf, HeaderEndMatch(), [this, session](const error_code &ec, std::size_t bytes_transferred) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { session->connection->attempt_reconnect = true; std::size_t num_additional_bytes = session->response->streambuf.size() - bytes_transferred; if(!ResponseMessage::parse(session->response->content, session->response->http_version, session->response->status_code, session->response->header)) { session->callback(make_error_code::make_error_code(errc::protocol_error)); return; } auto header_it = session->response->header.find("Content-Length"); if(header_it != session->response->header.end()) { auto content_length = std::stoull(header_it->second); if(content_length > num_additional_bytes) this->read_content(session, content_length - num_additional_bytes); else session->callback(ec); } else if((header_it = session->response->header.find("Transfer-Encoding")) != session->response->header.end() && header_it->second == "chunked") { // Expect hex number to not exceed 16 bytes (64-bit number), but take into account previous additional read bytes auto chunk_size_streambuf = std::make_shared<asio::streambuf>(std::max<std::size_t>(16 + 2, session->response->streambuf.size())); // Move leftover bytes auto &source = session->response->streambuf; auto &target = *chunk_size_streambuf; target.commit(asio::buffer_copy(target.prepare(source.size()), source.data())); source.consume(source.size()); this->read_chunked_transfer_encoded(session, chunk_size_streambuf); } else if(session->response->http_version < "1.1" || ((header_it = session->response->header.find("Connection")) != session->response->header.end() && header_it->second == "close")) read_content(session); else if(((header_it = session->response->header.find("Content-Type")) != session->response->header.end() && header_it->second == "text/event-stream")) { auto events_streambuf = std::make_shared<asio::streambuf>(this->config.max_response_streambuf_size); // Move leftover bytes auto &source = session->response->streambuf; auto &target = *events_streambuf; target.commit(asio::buffer_copy(target.prepare(source.size()), source.data())); source.consume(source.size()); session->callback(ec); // Connection to a Server-Sent Events resource is opened this->read_server_sent_event(session, events_streambuf); } else session->callback(ec); } else { if(session->connection->attempt_reconnect && ec != error::operation_aborted) reconnect(session, ec); else session->callback(ec); } }); } void reconnect(const std::shared_ptr<Session> &session, const error_code &ec) { LockGuard lock(connections_mutex); auto it = connections.find(session->connection); if(it != connections.end()) { connections.erase(it); session->connection = create_connection(); session->connection->attempt_reconnect = false; session->connection->in_use = true; session->response = std::shared_ptr<Response>(new Response(this->config.max_response_streambuf_size, session->connection)); connections.emplace(session->connection); lock.unlock(); this->connect(session); } else { lock.unlock(); session->callback(ec); } } void read_content(const std::shared_ptr<Session> &session, std::size_t remaining_length) { asio::async_read(*session->connection->socket, session->response->streambuf, asio::transfer_exactly(remaining_length), [this, session, remaining_length](const error_code &ec, std::size_t bytes_transferred) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { if(session->response->streambuf.size() == session->response->streambuf.max_size() && remaining_length > bytes_transferred) { session->response->content.end = false; session->callback(ec); session->response = std::shared_ptr<Response>(new Response(*session->response)); this->read_content(session, remaining_length - bytes_transferred); } else session->callback(ec); } else session->callback(ec); }); } void read_content(const std::shared_ptr<Session> &session) { asio::async_read(*session->connection->socket, session->response->streambuf, [this, session](const error_code &ec_, std::size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; auto ec = ec_ == error::eof ? error_code() : ec_; if(!ec) { { LockGuard lock(this->connections_mutex); this->connections.erase(session->connection); } if(session->response->streambuf.size() == session->response->streambuf.max_size()) { session->response->content.end = false; session->callback(ec); session->response = std::shared_ptr<Response>(new Response(*session->response)); this->read_content(session); } else session->callback(ec); } else session->callback(ec); }); } void read_chunked_transfer_encoded(const std::shared_ptr<Session> &session, const std::shared_ptr<asio::streambuf> &chunk_size_streambuf) { asio::async_read_until(*session->connection->socket, *chunk_size_streambuf, "\r\n", [this, session, chunk_size_streambuf](const error_code &ec, size_t bytes_transferred) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { std::istream istream(chunk_size_streambuf.get()); std::string line; std::getline(istream, line); bytes_transferred -= line.size() + 1; unsigned long chunk_size = 0; try { chunk_size = std::stoul(line, 0, 16); } catch(...) { session->callback(make_error_code::make_error_code(errc::protocol_error)); return; } if(chunk_size == 0) { session->callback(error_code()); return; } if(chunk_size + session->response->streambuf.size() > session->response->streambuf.max_size()) { session->response->content.end = false; session->callback(ec); session->response = std::shared_ptr<Response>(new Response(*session->response)); } auto num_additional_bytes = chunk_size_streambuf->size() - bytes_transferred; auto bytes_to_move = std::min<std::size_t>(chunk_size, num_additional_bytes); if(bytes_to_move > 0) { auto &source = *chunk_size_streambuf; auto &target = session->response->streambuf; target.commit(asio::buffer_copy(target.prepare(bytes_to_move), source.data(), bytes_to_move)); source.consume(bytes_to_move); } if(chunk_size > num_additional_bytes) { asio::async_read(*session->connection->socket, session->response->streambuf, asio::transfer_exactly(chunk_size - num_additional_bytes), [this, session, chunk_size_streambuf](const error_code &ec, size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { // Remove "\r\n" auto null_buffer = std::make_shared<asio::streambuf>(2); asio::async_read(*session->connection->socket, *null_buffer, asio::transfer_exactly(2), [this, session, chunk_size_streambuf, null_buffer](const error_code &ec, size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) read_chunked_transfer_encoded(session, chunk_size_streambuf); else session->callback(ec); }); } else session->callback(ec); }); } else if(2 + chunk_size > num_additional_bytes) { // If only end of chunk remains unread (\n or \r\n) // Remove "\r\n" if(2 + chunk_size - num_additional_bytes == 1) istream.get(); auto null_buffer = std::make_shared<asio::streambuf>(2); asio::async_read(*session->connection->socket, *null_buffer, asio::transfer_exactly(2 + chunk_size - num_additional_bytes), [this, session, chunk_size_streambuf, null_buffer](const error_code &ec, size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) read_chunked_transfer_encoded(session, chunk_size_streambuf); else session->callback(ec); }); } else { // Remove "\r\n" istream.get(); istream.get(); read_chunked_transfer_encoded(session, chunk_size_streambuf); } } else session->callback(ec); }); } void read_server_sent_event(const std::shared_ptr<Session> &session, const std::shared_ptr<asio::streambuf> &events_streambuf) { asio::async_read_until(*session->connection->socket, *events_streambuf, HeaderEndMatch(), [this, session, events_streambuf](const error_code &ec, std::size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { session->response->content.end = false; std::istream istream(events_streambuf.get()); std::ostream ostream(&session->response->streambuf); std::string line; while(std::getline(istream, line) && !line.empty() && !(line.back() == '\r' && line.size() == 1)) { ostream.write(line.data(), static_cast<std::streamsize>(line.size() - (line.back() == '\r' ? 1 : 0))); ostream.put('\n'); } session->callback(ec); session->response = std::shared_ptr<Response>(new Response(*session->response)); read_server_sent_event(session, events_streambuf); } else session->callback(ec); }); } }; template <class socket_type> class Client : public ClientBase<socket_type> {}; using HTTP = asio::ip::tcp::socket; template <> class Client<HTTP> : public ClientBase<HTTP> { public: /** * Constructs a client object. * * @param server_port_path Server resource given by host[:port][/path] */ Client(const std::string &server_port_path) noexcept : ClientBase<HTTP>::ClientBase(server_port_path, 80) {} protected: std::shared_ptr<Connection> create_connection() noexcept override { return std::make_shared<Connection>(handler_runner, *io_service); } void connect(const std::shared_ptr<Session> &session) override { if(!session->connection->socket->lowest_layer().is_open()) { auto resolver = std::make_shared<asio::ip::tcp::resolver>(*io_service); session->connection->set_timeout(config.timeout_connect); async_resolve(*resolver, *host_port, [this, session, resolver](const error_code &ec, resolver_results results) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { session->connection->set_timeout(config.timeout_connect); asio::async_connect(*session->connection->socket, results, [this, session, resolver](const error_code &ec, async_connect_endpoint /*endpoint*/) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { asio::ip::tcp::no_delay option(true); error_code ec; session->connection->socket->set_option(option, ec); this->write(session); } else session->callback(ec); }); } else session->callback(ec); }); } else write(session); } }; } // namespace SimpleWeb #endif /* SIMPLE_WEB_CLIENT_HTTP_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/client_https.hpp ================================================ #ifndef SIMPLE_WEB_CLIENT_HTTPS_HPP #define SIMPLE_WEB_CLIENT_HTTPS_HPP #include "client_http.hpp" #ifdef USE_STANDALONE_ASIO #include <asio/ssl.hpp> #else #include <boost/asio/ssl.hpp> #endif namespace SimpleWeb { using HTTPS = asio::ssl::stream<asio::ip::tcp::socket>; template <> class Client<HTTPS> : public ClientBase<HTTPS> { public: /** * Constructs a client object. * * @param server_port_path Server resource given by host[:port][/path] * @param verify_certificate Set to true (default) to verify the server's certificate and hostname according to RFC 2818. * @param certification_file If non-empty, sends the given certification file to server. Requires private_key_file. * @param private_key_file If non-empty, specifies the file containing the private key for certification_file. Requires certification_file. * @param verify_file If non-empty, use this certificate authority file to perform verification. */ Client(const std::string &server_port_path, bool verify_certificate = true, const std::string &certification_file = std::string(), const std::string &private_key_file = std::string(), const std::string &verify_file = std::string()) : ClientBase<HTTPS>::ClientBase(server_port_path, 443), #ifdef RHEL_CENTOS_7 context(asio::ssl::context::tlsv1) #else context(asio::ssl::context::tlsv12) #endif { if(certification_file.size() > 0 && private_key_file.size() > 0) { context.use_certificate_chain_file(certification_file); context.use_private_key_file(private_key_file, asio::ssl::context::pem); } if(verify_certificate) context.set_verify_callback(asio::ssl::rfc2818_verification(host)); if(verify_file.size() > 0) context.load_verify_file(verify_file); else context.set_default_verify_paths(); if(verify_certificate) context.set_verify_mode(asio::ssl::verify_peer); else context.set_verify_mode(asio::ssl::verify_none); } protected: asio::ssl::context context; std::shared_ptr<Connection> create_connection() noexcept override { return std::make_shared<Connection>(handler_runner, *io_service, context); } void connect(const std::shared_ptr<Session> &session) override { if(!session->connection->socket->lowest_layer().is_open()) { auto resolver = std::make_shared<asio::ip::tcp::resolver>(*io_service); async_resolve(*resolver, *host_port, [this, session, resolver](const error_code &ec, resolver_results results) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { session->connection->set_timeout(this->config.timeout_connect); asio::async_connect(session->connection->socket->lowest_layer(), results, [this, session, resolver](const error_code &ec, async_connect_endpoint /*endpoint*/) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { asio::ip::tcp::no_delay option(true); error_code ec; session->connection->socket->lowest_layer().set_option(option, ec); if(!this->config.proxy_server.empty()) { auto write_buffer = std::make_shared<asio::streambuf>(); std::ostream write_stream(write_buffer.get()); auto host_port = this->host + ':' + std::to_string(this->port); write_stream << "CONNECT " + host_port + " HTTP/1.1\r\n" << "Host: " << host_port << "\r\n\r\n"; session->connection->set_timeout(this->config.timeout_connect); asio::async_write(session->connection->socket->next_layer(), *write_buffer, [this, session, write_buffer](const error_code &ec, std::size_t /*bytes_transferred*/) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { std::shared_ptr<Response> response(new Response(this->config.max_response_streambuf_size, session->connection)); session->connection->set_timeout(this->config.timeout_connect); asio::async_read_until(session->connection->socket->next_layer(), response->streambuf, "\r\n\r\n", [this, session, response](const error_code &ec, std::size_t /*bytes_transferred*/) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(response->streambuf.size() == response->streambuf.max_size()) { session->callback(make_error_code::make_error_code(errc::message_size)); return; } if(!ec) { if(!ResponseMessage::parse(response->content, response->http_version, response->status_code, response->header)) session->callback(make_error_code::make_error_code(errc::protocol_error)); else { if(response->status_code.compare(0, 3, "200") != 0) session->callback(make_error_code::make_error_code(errc::permission_denied)); else this->handshake(session); } } else session->callback(ec); }); } else session->callback(ec); }); } else this->handshake(session); } else session->callback(ec); }); } else session->callback(ec); }); } else write(session); } void handshake(const std::shared_ptr<Session> &session) { SSL_set_tlsext_host_name(session->connection->socket->native_handle(), this->host.c_str()); session->connection->set_timeout(this->config.timeout_connect); session->connection->socket->async_handshake(asio::ssl::stream_base::client, [this, session](const error_code &ec) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) this->write(session); else session->callback(ec); }); } }; } // namespace SimpleWeb #endif /* SIMPLE_WEB_CLIENT_HTTPS_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/crypto.hpp ================================================ #ifndef SIMPLE_WEB_CRYPTO_HPP #define SIMPLE_WEB_CRYPTO_HPP #include <cmath> #include <iomanip> #include <istream> #include <sstream> #include <string> #include <vector> #include <openssl/buffer.h> #include <openssl/evp.h> #include <openssl/md5.h> #include <openssl/sha.h> namespace SimpleWeb { // TODO 2017: remove workaround for MSVS 2012 #if _MSC_VER == 1700 // MSVS 2012 has no definition for round() inline double round(double x) noexcept { // Custom definition of round() for positive numbers return floor(x + 0.5); } #endif class Crypto { const static std::size_t buffer_size = 131072; public: class Base64 { public: /// Returns Base64 encoded string from input string. static std::string encode(const std::string &input) noexcept { std::string base64; BIO *bio, *b64; BUF_MEM *bptr = BUF_MEM_new(); b64 = BIO_new(BIO_f_base64()); BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL); bio = BIO_new(BIO_s_mem()); BIO_push(b64, bio); BIO_set_mem_buf(b64, bptr, BIO_CLOSE); // Write directly to base64-buffer to avoid copy auto base64_length = static_cast<std::size_t>(round(4 * ceil(static_cast<double>(input.size()) / 3.0))); base64.resize(base64_length); bptr->length = 0; bptr->max = base64_length + 1; bptr->data = &base64[0]; if(BIO_write(b64, &input[0], static_cast<int>(input.size())) <= 0 || BIO_flush(b64) <= 0) base64.clear(); // To keep &base64[0] through BIO_free_all(b64) bptr->length = 0; bptr->max = 0; bptr->data = nullptr; BIO_free_all(b64); return base64; } /// Returns Base64 decoded string from base64 input. static std::string decode(const std::string &base64) noexcept { std::string ascii; // Resize ascii, however, the size is a up to two bytes too large. ascii.resize((6 * base64.size()) / 8); BIO *b64, *bio; b64 = BIO_new(BIO_f_base64()); BIO_set_flags(b64, BIO_FLAGS_BASE64_NO_NL); // TODO: Remove in 2022 or later #if(defined(OPENSSL_VERSION_NUMBER) && OPENSSL_VERSION_NUMBER < 0x1000214fL) || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2080000fL) bio = BIO_new_mem_buf(const_cast<char *>(&base64[0]), static_cast<int>(base64.size())); #else bio = BIO_new_mem_buf(&base64[0], static_cast<int>(base64.size())); #endif bio = BIO_push(b64, bio); auto decoded_length = BIO_read(bio, &ascii[0], static_cast<int>(ascii.size())); if(decoded_length > 0) ascii.resize(static_cast<std::size_t>(decoded_length)); else ascii.clear(); BIO_free_all(b64); return ascii; } }; /// Returns hex string from bytes in input string. static std::string to_hex_string(const std::string &input) noexcept { std::stringstream hex_stream; hex_stream << std::hex << std::internal << std::setfill('0'); for(auto &byte : input) hex_stream << std::setw(2) << static_cast<int>(static_cast<unsigned char>(byte)); return hex_stream.str(); } /// Returns md5 hash value from input string. static std::string md5(const std::string &input, std::size_t iterations = 1) noexcept { std::string hash; hash.resize(128 / 8); MD5(reinterpret_cast<const unsigned char *>(&input[0]), input.size(), reinterpret_cast<unsigned char *>(&hash[0])); for(std::size_t c = 1; c < iterations; ++c) MD5(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns md5 hash value from input stream. static std::string md5(std::istream &stream, std::size_t iterations = 1) noexcept { MD5_CTX context; MD5_Init(&context); std::streamsize read_length; std::vector<char> buffer(buffer_size); while((read_length = stream.read(&buffer[0], buffer_size).gcount()) > 0) MD5_Update(&context, buffer.data(), static_cast<std::size_t>(read_length)); std::string hash; hash.resize(128 / 8); MD5_Final(reinterpret_cast<unsigned char *>(&hash[0]), &context); for(std::size_t c = 1; c < iterations; ++c) MD5(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns sha1 hash value from input string. static std::string sha1(const std::string &input, std::size_t iterations = 1) noexcept { std::string hash; hash.resize(160 / 8); SHA1(reinterpret_cast<const unsigned char *>(&input[0]), input.size(), reinterpret_cast<unsigned char *>(&hash[0])); for(std::size_t c = 1; c < iterations; ++c) SHA1(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns sha1 hash value from input stream. static std::string sha1(std::istream &stream, std::size_t iterations = 1) noexcept { SHA_CTX context; SHA1_Init(&context); std::streamsize read_length; std::vector<char> buffer(buffer_size); while((read_length = stream.read(&buffer[0], buffer_size).gcount()) > 0) SHA1_Update(&context, buffer.data(), static_cast<std::size_t>(read_length)); std::string hash; hash.resize(160 / 8); SHA1_Final(reinterpret_cast<unsigned char *>(&hash[0]), &context); for(std::size_t c = 1; c < iterations; ++c) SHA1(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns sha256 hash value from input string. static std::string sha256(const std::string &input, std::size_t iterations = 1) noexcept { std::string hash; hash.resize(256 / 8); SHA256(reinterpret_cast<const unsigned char *>(&input[0]), input.size(), reinterpret_cast<unsigned char *>(&hash[0])); for(std::size_t c = 1; c < iterations; ++c) SHA256(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns sha256 hash value from input stream. static std::string sha256(std::istream &stream, std::size_t iterations = 1) noexcept { SHA256_CTX context; SHA256_Init(&context); std::streamsize read_length; std::vector<char> buffer(buffer_size); while((read_length = stream.read(&buffer[0], buffer_size).gcount()) > 0) SHA256_Update(&context, buffer.data(), static_cast<std::size_t>(read_length)); std::string hash; hash.resize(256 / 8); SHA256_Final(reinterpret_cast<unsigned char *>(&hash[0]), &context); for(std::size_t c = 1; c < iterations; ++c) SHA256(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns sha512 hash value from input string. static std::string sha512(const std::string &input, std::size_t iterations = 1) noexcept { std::string hash; hash.resize(512 / 8); SHA512(reinterpret_cast<const unsigned char *>(&input[0]), input.size(), reinterpret_cast<unsigned char *>(&hash[0])); for(std::size_t c = 1; c < iterations; ++c) SHA512(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns sha512 hash value from input stream. static std::string sha512(std::istream &stream, std::size_t iterations = 1) noexcept { SHA512_CTX context; SHA512_Init(&context); std::streamsize read_length; std::vector<char> buffer(buffer_size); while((read_length = stream.read(&buffer[0], buffer_size).gcount()) > 0) SHA512_Update(&context, buffer.data(), static_cast<std::size_t>(read_length)); std::string hash; hash.resize(512 / 8); SHA512_Final(reinterpret_cast<unsigned char *>(&hash[0]), &context); for(std::size_t c = 1; c < iterations; ++c) SHA512(reinterpret_cast<const unsigned char *>(&hash[0]), hash.size(), reinterpret_cast<unsigned char *>(&hash[0])); return hash; } /// Returns PBKDF2 hash value from the given password /// Input parameter key_size number of bytes of the returned key. /** * Returns PBKDF2 derived key from the given password. * * @param password The password to derive key from. * @param salt The salt to be used in the algorithm. * @param iterations Number of iterations to be used in the algorithm. * @param key_size Number of bytes of the returned key. * * @return The PBKDF2 derived key. */ static std::string pbkdf2(const std::string &password, const std::string &salt, int iterations, int key_size) noexcept { std::string key; key.resize(static_cast<std::size_t>(key_size)); PKCS5_PBKDF2_HMAC_SHA1(password.c_str(), password.size(), reinterpret_cast<const unsigned char *>(salt.c_str()), salt.size(), iterations, key_size, reinterpret_cast<unsigned char *>(&key[0])); return key; } }; } // namespace SimpleWeb #endif /* SIMPLE_WEB_CRYPTO_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/docs/Doxyfile ================================================ # Doxyfile 1.8.15 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the configuration # file that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # https://www.gnu.org/software/libiconv/ for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = "Simple-Web-Server" # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = # With the PROJECT_LOGO tag one can specify a logo or an icon that is included # in the documentation. The maximum height of the logo should not exceed 55 # pixels and the maximum width should not exceed 200 pixels. Doxygen will copy # the logo to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = docs/doxygen_output # If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # The OUTPUT_TEXT_DIRECTION tag is used to specify the direction in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all generated output in the proper direction. # Possible values are: None, LTR, RTL and Context. # The default value is: None. OUTPUT_TEXT_DIRECTION = None # If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = YES # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new # page for each member. If set to NO, the documentation of a member will be part # of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines (in the resulting output). You can put ^^ in the value part of an # alias to insert a newline as if a physical newline was in the original file. # When you need a literal { or } or , in the value part of an alias you have to # escape them by means of a backslash (\), this can lead to conflicts with the # commands \{ and \} for these it is advised to use the version @{ and @} or use # a double escape (\\{ and \\}) ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Set the OPTIMIZE_OUTPUT_SLICE tag to YES if your project consists of Slice # sources only. Doxygen will then generate output that is more tailored for that # language. For instance, namespaces will be presented as modules, types will be # separated into more groups, etc. # The default value is: NO. OPTIMIZE_OUTPUT_SLICE = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # Csharp (C#), C, C++, D, PHP, md (Markdown), Objective-C, Python, Slice, # Fortran (fixed format Fortran: FortranFixed, free formatted Fortran: # FortranFree, unknown formatted Fortran: Fortran. In the later case the parser # tries to guess whether the code is fixed or free formatted code, this is the # default for Fortran type files), VHDL, tcl. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is # Fortran), use: inc=Fortran f=C. # # Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See https://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When the TOC_INCLUDE_HEADINGS tag is set to a non-zero value, all headings up # to that level are automatically included in the table of contents, even if # they do not have an id attribute. # Note: This feature currently applies only to Markdown headings. # Minimum value: 0, maximum value: 99, default value: 0. # This tag requires that the tag MARKDOWN_SUPPORT is set to YES. TOC_INCLUDE_HEADINGS = 0 # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by putting a % sign in front of the word or # globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # https://www.riverbankcomputing.com/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # If one adds a struct or class to a group and this option is enabled, then also # any nested class or struct is added to the same group. By default this option # is disabled and one has to add nested compounds explicitly via \ingroup. # The default value is: NO. GROUP_NESTED_COMPOUNDS = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = NO # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined # locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO, these classes will be included in the various overviews. This option # has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO, these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will # append additional text to a page's title, such as Class Reference. If set to # YES the compound reference will be hidden. # The default value is: NO. HIDE_COMPOUND_REFERENCE= NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo # list. This list is created by putting \todo commands in the documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test # list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if <section_label> ... \endif and \cond <section_label> # ... \endcond blocks. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES, the # list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also https://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. See also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO, doxygen will only warn about wrong or incomplete # parameter documentation, but not about the absence of documentation. If # EXTRACT_ALL is set to YES then this flag will automatically be disabled. # The default value is: NO. WARN_NO_PARAMDOC = NO # If the WARN_AS_ERROR tag is set to YES then doxygen will immediately stop when # a warning is encountered. # The default value is: NO. WARN_AS_ERROR = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: https://www.gnu.org/software/libiconv/) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # read by doxygen. # # If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, # *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, # *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, # *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.pyw, *.f90, *.f95, *.f03, *.f08, # *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, *.qsf and *.ice. FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.c++ \ *.java \ *.ii \ *.ixx \ *.ipp \ *.i++ \ *.inl \ *.idl \ *.ddl \ *.odl \ *.h \ *.hh \ *.hxx \ *.hpp \ *.h++ \ *.cs \ *.d \ *.php \ *.php4 \ *.php5 \ *.phtml \ *.inc \ *.m \ *.markdown \ *.md \ *.mm \ *.dox \ *.py \ *.pyw \ *.f90 \ *.f95 \ *.f03 \ *.f08 \ *.f \ *.for \ *.tcl \ *.vhd \ *.vhdl \ *.ucf \ *.qsf \ *.ice # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # <filter> <input-file> # # where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. # # Note that for custom extensions or not directly supported extensions you also # need to set EXTENSION_MAPPING for the extension otherwise the files are not # properly processed by doxygen. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = README.md #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # entity all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see https://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the configuration file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = YES # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # https://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to YES can help to show when doxygen was last run and thus if the # documentation is up to date. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = NO # If the HTML_DYNAMIC_MENUS tag is set to YES then the generated HTML # documentation will contain a main index with vertical navigation menus that # are dynamically created via Javascript. If disabled, the navigation index will # consists of multiple levels of tabs that are statically embedded in every HTML # page. Disable this option to support browsers that do not have Javascript, # like the Qt help browser. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_MENUS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: https://developer.apple.com/xcode/), introduced with OSX # 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See https://developer.apple.com/library/archive/featuredarticles/Doxy # genXcode/_index.html for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: https://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated # (YES) or that it should be included in the master .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated # (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://doc.qt.io/archives/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://doc.qt.io/archives/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = NO # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = NO # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANSPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # https://www.mathjax.org) which uses client side Javascript for the rendering # instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from https://www.mathjax.org before deployment. # The default value is: https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/ # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use <access key> + S # (what the <access key> is depends on the OS and browser, but it is typically # <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down # key> to jump into the search results window, the results can be navigated # using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel # the search. The filter options can be selected when the cursor is inside the # search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys> # to select a filter and <Enter> or <escape> to activate or cancel the filter # option. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be # implemented using a web server instead of a web client using Javascript. There # are two flavors of web server based searching depending on the EXTERNAL_SEARCH # setting. When disabled, doxygen will generate a PHP script for searching and # an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing # and searching needs to be provided by external tools. See the section # "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. SERVER_BASED_SEARCH = NO # When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP # script for searching. Instead the search results are written to an XML file # which needs to be processed by an external indexer. Doxygen will invoke an # external search engine pointed to by the SEARCHENGINE_URL option to obtain the # search results. # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: https://xapian.org/). # # See the section "External Indexing and Searching" for details. # The default value is: NO. # This tag requires that the tag SEARCHENGINE is set to YES. EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will return the search results when EXTERNAL_SEARCH is enabled. # # Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: https://xapian.org/). See the section "External Indexing and # Searching" for details. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHENGINE_URL = # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed # search data is written to a file for indexing by an external tool. With the # SEARCHDATA_FILE tag the name of this file can be specified. # The default file is: searchdata.xml. # This tag requires that the tag SEARCHENGINE is set to YES. SEARCHDATA_FILE = searchdata.xml # When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the # EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is # useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple # projects and redirect the results back to the right project. # This tag requires that the tag SEARCHENGINE is set to YES. EXTERNAL_SEARCH_ID = # The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen # projects other than the one defined by this configuration file, but that are # all added to the same external search index. Each project needs to have a # unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of # to a relative location where the documentation can be found. The format is: # EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ... # This tag requires that the tag SEARCHENGINE is set to YES. EXTRA_SEARCH_MAPPINGS = #--------------------------------------------------------------------------- # Configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. # The default value is: YES. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: latex. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. # # Note that when not enabling USE_PDFLATEX the default is latex when enabling # USE_PDFLATEX the default is pdflatex and when in the later case latex is # chosen this is overwritten by pdflatex. For specific output languages the # default can have been set differently, this depends on the implementation of # the output language. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_CMD_NAME = # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate # index for LaTeX. # Note: This tag is used in the Makefile / make.bat. # See also: LATEX_MAKEINDEX_CMD for the part in the generated output file # (.tex). # The default file is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. MAKEINDEX_CMD_NAME = makeindex # The LATEX_MAKEINDEX_CMD tag can be used to specify the command name to # generate index for LaTeX. In case there is no backslash (\) as first character # it will be automatically added in the LaTeX code. # Note: This tag is used in the generated output file (.tex). # See also: MAKEINDEX_CMD_NAME for the part in the Makefile / make.bat. # The default value is: makeindex. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_MAKEINDEX_CMD = makeindex # If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used by the # printer. # Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x # 14 inches) and executive (7.25 x 10.5 inches). # The default value is: a4. # This tag requires that the tag GENERATE_LATEX is set to YES. PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names # that should be included in the LaTeX output. The package can be specified just # by its name or with the correct syntax as to be used with the LaTeX # \usepackage command. To get the times font for instance you can specify : # EXTRA_PACKAGES=times or EXTRA_PACKAGES={times} # To use the option intlimits with the amsmath package you can specify: # EXTRA_PACKAGES=[intlimits]{amsmath} # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for the # generated LaTeX document. The header should contain everything until the first # chapter. If it is left blank doxygen will generate a standard header. See # section "Doxygen usage" for information on how to let doxygen write the # default header to a separate file. # # Note: Only use a user-defined header if you know what you are doing! The # following commands have a special meaning inside the header: $title, # $datetime, $date, $doxygenversion, $projectname, $projectnumber, # $projectbrief, $projectlogo. Doxygen will replace $title with the empty # string, for the replacement values of the other commands the user is referred # to HTML_HEADER. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = # The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the # generated LaTeX document. The footer should contain everything after the last # chapter. If it is left blank doxygen will generate a standard footer. See # LATEX_HEADER for more information on how to generate a default footer and what # special commands can be used inside the footer. # # Note: Only use a user-defined footer if you know what you are doing! # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_FOOTER = # The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined # LaTeX style sheets that are included after the standard style sheets created # by doxygen. Using this option one can overrule certain style aspects. Doxygen # will copy the style sheet files to the output directory. # Note: The order of the extra style sheet files is of importance (e.g. the last # style sheet in the list overrules the setting of the previous ones in the # list). # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EXTRA_STYLESHEET = # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the LATEX_OUTPUT output # directory. Note that the files will be copied as-is; there are no commands or # markers available. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EXTRA_FILES = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is # prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will # contain links (just like the HTML output) instead of page references. This # makes the output suitable for online browsing using a PDF viewer. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate # the PDF file directly from the LaTeX files. Set this option to YES, to get a # higher quality PDF documentation. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode # command to the generated LaTeX files. This will instruct LaTeX to keep running # if errors occur, instead of asking the user for help. This option is also used # when generating formulas in HTML. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_BATCHMODE = NO # If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the # index chapters (such as File Index, Compound Index, etc.) in the output. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HIDE_INDICES = NO # If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source # code with syntax highlighting in the LaTeX output. # # Note that which sources are shown also depends on other settings such as # SOURCE_BROWSER. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_SOURCE_CODE = NO # The LATEX_BIB_STYLE tag can be used to specify the style to use for the # bibliography, e.g. plainnat, or ieeetr. See # https://en.wikipedia.org/wiki/BibTeX and \cite for more info. # The default value is: plain. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_BIB_STYLE = plain # If the LATEX_TIMESTAMP tag is set to YES then the footer of each generated # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: NO. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_TIMESTAMP = NO # The LATEX_EMOJI_DIRECTORY tag is used to specify the (relative or absolute) # path from which the emoji images will be read. If a relative path is entered, # it will be relative to the LATEX_OUTPUT directory. If left blank the # LATEX_OUTPUT directory will be used. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_EMOJI_DIRECTORY = #--------------------------------------------------------------------------- # Configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The # RTF output is optimized for Word 97 and may not look too pretty with other RTF # readers/editors. # The default value is: NO. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: rtf. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will # contain hyperlink fields. The RTF file will contain links (just like the HTML # output) instead of page references. This makes the output suitable for online # browsing using Word or some other Word compatible readers that support those # fields. # # Note: WordPad (write) and others do not support links. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # configuration file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. # # See also section "Doxygen usage" for information on how to generate the # default style sheet that doxygen normally uses. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an RTF document. Syntax is # similar to doxygen's configuration file. A template extensions file can be # generated using doxygen -e rtf extensionFile. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_EXTENSIONS_FILE = # If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code # with syntax highlighting in the RTF output. # # Note that which sources are shown also depends on other settings such as # SOURCE_BROWSER. # The default value is: NO. # This tag requires that the tag GENERATE_RTF is set to YES. RTF_SOURCE_CODE = NO #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for # classes and files. # The default value is: NO. GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. A directory man3 will be created inside the directory specified by # MAN_OUTPUT. # The default directory is: man. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to the generated # man pages. In case the manual section does not start with a number, the number # 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is # optional. # The default value is: .3. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_EXTENSION = .3 # The MAN_SUBDIR tag determines the name of the directory created within # MAN_OUTPUT in which the man pages are placed. If defaults to man followed by # MAN_EXTENSION with the initial . removed. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_SUBDIR = # If the MAN_LINKS tag is set to YES and doxygen generates man output, then it # will generate one additional man file for each entity documented in the real # man page(s). These additional files only source the real man page, but without # them the man command would be unable to find the correct page. # The default value is: NO. # This tag requires that the tag GENERATE_MAN is set to YES. MAN_LINKS = NO #--------------------------------------------------------------------------- # Configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that # captures the structure of the code including all documentation. # The default value is: NO. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: xml. # This tag requires that the tag GENERATE_XML is set to YES. XML_OUTPUT = xml # If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program # listings (including syntax highlighting and cross-referencing information) to # the XML output. Note that enabling this will significantly increase the size # of the XML output. # The default value is: YES. # This tag requires that the tag GENERATE_XML is set to YES. XML_PROGRAMLISTING = YES # If the XML_NS_MEMB_FILE_SCOPE tag is set to YES, doxygen will include # namespace members in file scope as well, matching the HTML output. # The default value is: NO. # This tag requires that the tag GENERATE_XML is set to YES. XML_NS_MEMB_FILE_SCOPE = NO #--------------------------------------------------------------------------- # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- # If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files # that can be used to generate PDF. # The default value is: NO. GENERATE_DOCBOOK = NO # The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be put in # front of it. # The default directory is: docbook. # This tag requires that the tag GENERATE_DOCBOOK is set to YES. DOCBOOK_OUTPUT = docbook # If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the # program listings (including syntax highlighting and cross-referencing # information) to the DOCBOOK output. Note that enabling this will significantly # increase the size of the DOCBOOK output. # The default value is: NO. # This tag requires that the tag GENERATE_DOCBOOK is set to YES. DOCBOOK_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an # AutoGen Definitions (see http://autogen.sourceforge.net/) file that captures # the structure of the code including all documentation. Note that this feature # is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # Configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module # file that captures the structure of the code including all documentation. # # Note that this feature is still experimental and incomplete at the moment. # The default value is: NO. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI # output from the Perl module output. # The default value is: NO. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely # formatted so it can be parsed by a human reader. This is useful if you want to # understand what is going on. On the other hand, if this tag is set to NO, the # size of the Perl module output will be much smaller and Perl will parse it # just the same. # The default value is: YES. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file are # prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful # so different doxyrules.make files included by the same Makefile don't # overwrite each other's variables. # This tag requires that the tag GENERATE_PERLMOD is set to YES. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all # C-preprocessor directives found in the sources and include files. # The default value is: YES. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names # in the source code. If set to NO, only conditional compilation will be # performed. Macro expansion can be done in a controlled way by setting # EXPAND_ONLY_PREDEF to YES. # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and # EXPAND_AS_DEFINED tags. # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by the # preprocessor. # This tag requires that the tag SEARCH_INCLUDES is set to YES. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will be # used. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that are # defined before the preprocessor is started (similar to the -D option of e.g. # gcc). The argument of the tag is a list of macros of the form: name or # name=definition (no spaces). If the definition and the "=" are omitted, "=1" # is assumed. To prevent a macro definition from being undefined via #undef or # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The # macro definition that is found in the sources will be used. Use the PREDEFINED # tag if you want to use a different macro definition that overrules the # definition found in the source code. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will # remove all references to function-like macros that are alone on a line, have # an all uppercase name, and do not end with a semicolon. Such function macros # are typically used for boiler-plate code, and will confuse the parser if not # removed. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration options related to external references #--------------------------------------------------------------------------- # The TAGFILES tag can be used to specify one or more tag files. For each tag # file the location of the external documentation should be added. The format of # a tag file without this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where loc1 and loc2 can be relative or absolute paths or URLs. See the # section "Linking to external documentation" for more information about the use # of tag files. # Note: Each tag file must have a unique name (where the name does NOT include # the path). If a tag file is not located in the directory in which doxygen is # run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create a # tag file that is based on the input files it reads. See section "Linking to # external documentation" for more information about the usage of tag files. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES, all external class will be listed in # the class index. If set to NO, only the inherited external classes will be # listed. # The default value is: NO. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed # in the modules index. If set to NO, only the current project's groups will be # listed. # The default value is: YES. EXTERNAL_GROUPS = YES # If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in # the related pages index. If set to NO, only the current project's pages will # be listed. # The default value is: YES. EXTERNAL_PAGES = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of 'which perl'). # The default file (with absolute path) is: /usr/bin/perl. PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram # (in HTML and LaTeX) for classes with base or super classes. Setting the tag to # NO turns the diagrams off. Note that this option also works with HAVE_DOT # disabled, but it is recommended to install and use dot, since it yields more # powerful graphs. # The default value is: YES. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see: # http://www.mcternan.me.uk/mscgen/)) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # You can include diagrams made with dia in doxygen documentation. Doxygen will # then run dia to produce the diagram and insert it in the documentation. The # DIA_PATH tag allows you to specify the directory where the dia binary resides. # If left empty dia is assumed to be found in the default search path. DIA_PATH = # If set to YES the inheritance and collaboration graphs will hide inheritance # and usage relations if the target is undocumented or is not a class. # The default value is: YES. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz (see: # http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent # Bell Labs. The other options in this section have no effect if this option is # set to NO # The default value is: NO. HAVE_DOT = NO # The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed # to run in parallel. When set to 0 doxygen will base this on the number of # processors available in the system. You can set it explicitly to a value # larger than 0 to get control over the balance between CPU load and processing # speed. # Minimum value: 0, maximum value: 32, default value: 0. # This tag requires that the tag HAVE_DOT is set to YES. DOT_NUM_THREADS = 0 # When you want a differently looking font in the dot files that doxygen # generates you can specify the font name using DOT_FONTNAME. You need to make # sure dot is able to find the font, which can be done by putting it in a # standard location or by setting the DOTFONTPATH environment variable or by # setting DOT_FONTPATH to the directory containing the font. # The default value is: Helvetica. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTNAME = Helvetica # The DOT_FONTSIZE tag can be used to set the size (in points) of the font of # dot graphs. # Minimum value: 4, maximum value: 24, default value: 10. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the default font as specified with # DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set # the path where dot can find it using this tag. # This tag requires that the tag HAVE_DOT is set to YES. DOT_FONTPATH = # If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for # each documented class showing the direct and indirect inheritance relations. # Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a # graph for each documented class showing the direct and indirect implementation # dependencies (inheritance, containment, and class references variables) of the # class with other documented classes. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for # groups, showing the direct groups dependencies. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES, doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. UML_LOOK = NO # If the UML_LOOK tag is enabled, the fields and methods are shown inside the # class node. If there are many fields or methods and many nodes the graph may # become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the # number of items for each type to make the size more manageable. Set this to 0 # for no limit. Note that the threshold may be exceeded by 50% before the limit # is enforced. So when you set the threshold to 10, up to 15 fields may appear, # but if the number exceeds 15, the total amount of fields shown is limited to # 10. # Minimum value: 0, maximum value: 100, default value: 10. # This tag requires that the tag HAVE_DOT is set to YES. UML_LIMIT_NUM_FIELDS = 10 # If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and # collaboration graphs will show the relations between templates and their # instances. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. TEMPLATE_RELATIONS = NO # If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to # YES then doxygen will generate a graph for each documented file showing the # direct and indirect include dependencies of the file with other documented # files. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. INCLUDE_GRAPH = YES # If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are # set to YES then doxygen will generate a graph for each documented file showing # the direct and indirect include dependencies of the file with other documented # files. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH tag is set to YES then doxygen will generate a call # dependency graph for every global function or class method. # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. Disabling a call graph can be # accomplished by means of the command \hidecallgraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. CALL_GRAPH = NO # If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller # dependency graph for every global function or class method. # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable caller graphs for selected # functions only using the \callergraph command. Disabling a caller graph can be # accomplished by means of the command \hidecallergraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical # hierarchy of all classes instead of a textual one. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the # dependencies a directory has on other directories in a graphical way. The # dependency relations are determined by the #include relations between the # files in the directories. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. For an explanation of the image formats see the section # output formats in the documentation of the dot tool (Graphviz (see: # http://www.graphviz.org/)). # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order # to make the SVG files visible in IE 9+ (other browsers do not have this # requirement). # Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo, # png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and # png:gdiplus:gdiplus. # The default value is: png. # This tag requires that the tag HAVE_DOT is set to YES. DOT_IMAGE_FORMAT = png # If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to # enable generation of interactive SVG images that allow zooming and panning. # # Note that this requires a modern browser other than Internet Explorer. Tested # and working are Firefox, Chrome, Safari, and Opera. # Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make # the SVG files visible. Older versions of IE do not have SVG support. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. INTERACTIVE_SVG = NO # The DOT_PATH tag can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. # This tag requires that the tag HAVE_DOT is set to YES. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the \dotfile # command). # This tag requires that the tag HAVE_DOT is set to YES. DOTFILE_DIRS = # The MSCFILE_DIRS tag can be used to specify one or more directories that # contain msc files that are included in the documentation (see the \mscfile # command). MSCFILE_DIRS = # The DIAFILE_DIRS tag can be used to specify one or more directories that # contain dia files that are included in the documentation (see the \diafile # command). DIAFILE_DIRS = # When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the # path where java can find the plantuml.jar file. If left blank, it is assumed # PlantUML is not used or called during a preprocessing step. Doxygen will # generate a warning when it encounters a \startuml command in this case and # will not generate output for the diagram. PLANTUML_JAR_PATH = # When using plantuml, the PLANTUML_CFG_FILE tag can be used to specify a # configuration file for plantuml. PLANTUML_CFG_FILE = # When using plantuml, the specified paths are searched for files specified by # the !include statement in a plantuml block. PLANTUML_INCLUDE_PATH = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes # that will be shown in the graph. If the number of nodes in a graph becomes # larger than this value, doxygen will truncate the graph, which is visualized # by representing a node as a red box. Note that doxygen if the number of direct # children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that # the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. # Minimum value: 0, maximum value: 10000, default value: 50. # This tag requires that the tag HAVE_DOT is set to YES. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs # generated by dot. A depth value of 3 means that only nodes reachable from the # root by following a path via at most 3 edges will be shown. Nodes that lay # further from the root node will be omitted. Note that setting this option to 1 # or 2 may greatly reduce the computation time needed for large code bases. Also # note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. # Minimum value: 0, maximum value: 1000, default value: 0. # This tag requires that the tag HAVE_DOT is set to YES. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not seem # to support this out of the box. # # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support # this, this feature is disabled by default. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page # explaining the meaning of the various boxes and arrows in the dot generated # graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot # files that are used to generate the various graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. DOT_CLEANUP = YES ================================================ FILE: C/thirdparty/Simple-Web-Server/docs/benchmarks.md ================================================ # Benchmarks A simple benchmark of Simple-Web-Server and a few similar web libraries. Details: * Linux distribution: Debian Testing (2019-07-29) * Linux kernel: 4.19.0-1-amd64 * CPU: Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz * CPU cores: 4 * The HTTP load generator [httperf](https://github.com/httperf/httperf) is used to create the benchmark results, with the following arguments: ```sh httperf --server=localhost --port=3000 --uri=/ --num-conns=20000 --num-calls=200 ``` The response messages were made identical. ## Express [Express](https://expressjs.com/) is a popular Node.js web framework. Versions: * Node: v10.15.2 * Express: 4.17.1 Code: ```js const express = require('express'); const app = express(); app.get('/', (req, res) => { res.removeHeader('X-Powered-By'); res.removeHeader('Connection'); res.end('Hello World!') }); const port = 3000; app.listen(port, () => console.log(`Example app listening on port ${port}!`)); ``` Execution: ```sh NODE_ENV=production node index.js ``` Example results (13659.7 req/s): ```sh httperf --client=0/1 --server=localhost --port=3000 --uri=/ --send-buffer=4096 --recv-buffer=16384 --num-conns=20000 --num-calls=200 httperf: warning: open file limit > FD_SETSIZE; limiting max. # of open files to FD_SETSIZE Maximum connect burst length: 1 Total: connections 20000 requests 40000 replies 20000 test-duration 2.928 s Connection rate: 6829.9 conn/s (0.1 ms/conn, <=1 concurrent connections) Connection time [ms]: min 0.1 avg 0.1 max 14.8 median 0.5 stddev 0.1 Connection time [ms]: connect 0.0 Connection length [replies/conn]: 1.000 Request rate: 13659.7 req/s (0.1 ms/req) Request size [B]: 62.0 Reply rate [replies/s]: min 0.0 avg 0.0 max 0.0 stddev 0.0 (0 samples) Reply time [ms]: response 0.1 transfer 0.0 Reply size [B]: header 76.0 content 12.0 footer 0.0 (total 88.0) Reply status: 1xx=0 2xx=20000 3xx=0 4xx=0 5xx=0 CPU time [s]: user 0.66 system 2.27 (user 22.4% system 77.5% total 99.9%) Net I/O: 1414.0 KB/s (11.6*10^6 bps) Errors: total 20000 client-timo 0 socket-timo 0 connrefused 0 connreset 20000 Errors: fd-unavail 0 addrunavail 0 ftab-full 0 other 0 ``` ## Hyper [Hyper](https://hyper.rs/) is a Rust HTTP library that topped the [TechEmpower Web Framework Benchmarks results](https://www.techempower.com/benchmarks/#section=data-r18&hw=ph&test=plaintext) in 2019-07-09. Versions: * rustc: 1.38.0-nightly * hyper: 0.12 Code (copied from https://github.com/hyperium/hyper/blob/0.12.x/examples/hello.rs, but removed `pretty_env_logger` calls due to compilation issues): ```rust #![deny(warnings)] extern crate hyper; // extern crate pretty_env_logger; use hyper::{Body, Request, Response, Server}; use hyper::service::service_fn_ok; use hyper::rt::{self, Future}; fn main() { // pretty_env_logger::init(); let addr = ([127, 0, 0, 1], 3000).into(); let server = Server::bind(&addr) .serve(|| { // This is the `Service` that will handle the connection. // `service_fn_ok` is a helper to convert a function that // returns a Response into a `Service`. service_fn_ok(move |_: Request<Body>| { Response::new(Body::from("Hello World!")) }) }) .map_err(|e| eprintln!("server error: {}", e)); println!("Listening on http://{}", addr); rt::run(server); } ``` Compilation and run: ```sh cargo run --release ``` Example results (60712.3 req/s): ```sh httperf --client=0/1 --server=localhost --port=3000 --uri=/ --send-buffer=4096 --recv-buffer=16384 --num-conns=20000 --num-calls=200 httperf: warning: open file limit > FD_SETSIZE; limiting max. # of open files to FD_SETSIZE Maximum connect burst length: 1 Total: connections 20000 requests 4000000 replies 4000000 test-duration 65.884 s Connection rate: 303.6 conn/s (3.3 ms/conn, <=1 concurrent connections) Connection time [ms]: min 3.0 avg 3.3 max 11.3 median 3.5 stddev 0.3 Connection time [ms]: connect 0.0 Connection length [replies/conn]: 200.000 Request rate: 60712.3 req/s (0.0 ms/req) Request size [B]: 62.0 Reply rate [replies/s]: min 58704.0 avg 60732.7 max 62587.7 stddev 1021.7 (13 samples) Reply time [ms]: response 0.0 transfer 0.0 Reply size [B]: header 76.0 content 12.0 footer 0.0 (total 88.0) Reply status: 1xx=0 2xx=4000000 3xx=0 4xx=0 5xx=0 CPU time [s]: user 15.91 system 49.97 (user 24.1% system 75.8% total 100.0%) Net I/O: 8893.4 KB/s (72.9*10^6 bps) Errors: total 0 client-timo 0 socket-timo 0 connrefused 0 connreset 0 Errors: fd-unavail 0 addrunavail 0 ftab-full 0 other 0 ``` ## Simple-Web-Server In these simplistic tests, the performance of Simple-Web-Server is similar to the Hyper Rust HTTP library, although Hyper seems to be slightly faster more often than not. Versions: * g++: 9.1.0 Code (modified `http_examples.cpp`): ```c++ #include "server_http.hpp" using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; int main() { HttpServer server; server.config.port = 3000; server.default_resource["GET"] = [](std::shared_ptr<HttpServer::Response> response, std::shared_ptr<HttpServer::Request> /*request*/) { response->write("Hello World!", {{"Date", SimpleWeb::Date::to_string(std::chrono::system_clock::now())}}); }; server.start(); } ``` Build, compilation and run: ```sh mkdir build && cd build CXX=g++-9 CXXFLAGS="-O2 -DNDEBUG -flto" cmake .. make ./http_examples ``` Example results (60596.3 req/s): ```sh httperf --client=0/1 --server=localhost --port=3000 --uri=/ --send-buffer=4096 --recv-buffer=16384 --num-conns=20000 --num-calls=200 httperf: warning: open file limit > FD_SETSIZE; limiting max. # of open files to FD_SETSIZE Maximum connect burst length: 1 Total: connections 20000 requests 4000000 replies 4000000 test-duration 66.011 s Connection rate: 303.0 conn/s (3.3 ms/conn, <=1 concurrent connections) Connection time [ms]: min 3.2 avg 3.3 max 8.0 median 3.5 stddev 0.0 Connection time [ms]: connect 0.0 Connection length [replies/conn]: 200.000 Request rate: 60596.3 req/s (0.0 ms/req) Request size [B]: 62.0 Reply rate [replies/s]: min 60399.6 avg 60596.9 max 60803.8 stddev 130.9 (13 samples) Reply time [ms]: response 0.0 transfer 0.0 Reply size [B]: header 76.0 content 12.0 footer 0.0 (total 88.0) Reply status: 1xx=0 2xx=4000000 3xx=0 4xx=0 5xx=0 CPU time [s]: user 16.07 system 49.93 (user 24.3% system 75.6% total 100.0%) Net I/O: 8876.4 KB/s (72.7*10^6 bps) Errors: total 0 client-timo 0 socket-timo 0 connrefused 0 connreset 0 Errors: fd-unavail 0 addrunavail 0 ftab-full 0 other 0 ``` ================================================ FILE: C/thirdparty/Simple-Web-Server/http_examples.cpp ================================================ #include "client_http.hpp" #include "server_http.hpp" #include <future> // Added for the json-example #define BOOST_SPIRIT_THREADSAFE #include <boost/property_tree/json_parser.hpp> #include <boost/property_tree/ptree.hpp> // Added for the default_resource example #include <algorithm> #include <boost/filesystem.hpp> #include <fstream> #include <vector> #ifdef HAVE_OPENSSL #include "crypto.hpp" #endif using namespace std; // Added for the json-example: using namespace boost::property_tree; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; int main() { // HTTP-server at port 8080 using 1 thread // Unless you do more heavy non-threaded processing in the resources, // 1 thread is usually faster than several threads HttpServer server; server.config.port = 8080; // Add resources using path-regex and method-string, and an anonymous function // POST-example for the path /string, responds the posted string server.resource["^/string$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { // Retrieve string: auto content = request->content.string(); // request->content.string() is a convenience function for: // stringstream ss; // ss << request->content.rdbuf(); // auto content=ss.str(); *response << "HTTP/1.1 200 OK\r\nContent-Length: " << content.length() << "\r\n\r\n" << content; // Alternatively, use one of the convenience functions, for instance: // response->write(content); }; // POST-example for the path /json, responds firstName+" "+lastName from the posted json // Responds with an appropriate error message if the posted json is not valid, or if firstName or lastName is missing // Example posted json: // { // "firstName": "John", // "lastName": "Smith", // "age": 25 // } server.resource["^/json$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { ptree pt; read_json(request->content, pt); auto name = pt.get<string>("firstName") + " " + pt.get<string>("lastName"); *response << "HTTP/1.1 200 OK\r\n" << "Content-Length: " << name.length() << "\r\n\r\n" << name; } catch(const exception &e) { *response << "HTTP/1.1 400 Bad Request\r\nContent-Length: " << strlen(e.what()) << "\r\n\r\n" << e.what(); } // Alternatively, using a convenience function: // try { // ptree pt; // read_json(request->content, pt); // auto name=pt.get<string>("firstName")+" "+pt.get<string>("lastName"); // response->write(name); // } // catch(const exception &e) { // response->write(SimpleWeb::StatusCode::client_error_bad_request, e.what()); // } }; // GET-example for the path /info // Responds with request-information server.resource["^/info$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { stringstream stream; stream << "<h1>Request from " << request->remote_endpoint().address().to_string() << ":" << request->remote_endpoint().port() << "</h1>"; stream << request->method << " " << request->path << " HTTP/" << request->http_version; stream << "<h2>Query Fields</h2>"; auto query_fields = request->parse_query_string(); for(auto &field : query_fields) stream << field.first << ": " << field.second << "<br>"; stream << "<h2>Header Fields</h2>"; for(auto &field : request->header) stream << field.first << ": " << field.second << "<br>"; response->write(stream); }; // GET-example for the path /match/[number], responds with the matched string in path (number) // For instance a request GET /match/123 will receive: 123 server.resource["^/match/([0-9]+)$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { response->write(request->path_match[1].str()); }; // GET-example simulating heavy work in a separate thread server.resource["^/work$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { thread work_thread([response] { this_thread::sleep_for(chrono::seconds(5)); response->write("Work done"); }); work_thread.detach(); }; // Default GET-example. If no other matches, this anonymous function will be called. // Will respond with content in the web/-directory, and its subdirectories. // Default file: index.html // Can for instance be used to retrieve an HTML 5 client that uses REST-resources on this server server.default_resource["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { try { auto web_root_path = boost::filesystem::canonical("web"); auto path = boost::filesystem::canonical(web_root_path / request->path); // Check if path is within web_root_path if(distance(web_root_path.begin(), web_root_path.end()) > distance(path.begin(), path.end()) || !equal(web_root_path.begin(), web_root_path.end(), path.begin())) throw invalid_argument("path must be within root path"); if(boost::filesystem::is_directory(path)) path /= "index.html"; SimpleWeb::CaseInsensitiveMultimap header; // Uncomment the following line to enable Cache-Control // header.emplace("Cache-Control", "max-age=86400"); #ifdef HAVE_OPENSSL // Uncomment the following lines to enable ETag // { // ifstream ifs(path.string(), ifstream::in | ios::binary); // if(ifs) { // auto hash = SimpleWeb::Crypto::to_hex_string(SimpleWeb::Crypto::md5(ifs)); // header.emplace("ETag", "\"" + hash + "\""); // auto it = request->header.find("If-None-Match"); // if(it != request->header.end()) { // if(!it->second.empty() && it->second.compare(1, hash.size(), hash) == 0) { // response->write(SimpleWeb::StatusCode::redirection_not_modified, header); // return; // } // } // } // else // throw invalid_argument("could not read file"); // } #endif auto ifs = make_shared<ifstream>(); ifs->open(path.string(), ifstream::in | ios::binary | ios::ate); if(*ifs) { auto length = ifs->tellg(); ifs->seekg(0, ios::beg); header.emplace("Content-Length", to_string(length)); response->write(header); // Trick to define a recursive function within this scope (for example purposes) class FileServer { public: static void read_and_send(const shared_ptr<HttpServer::Response> &response, const shared_ptr<ifstream> &ifs) { // Read and send 128 KB at a time static vector<char> buffer(131072); // Safe when server is running on one thread streamsize read_length; if((read_length = ifs->read(&buffer[0], static_cast<streamsize>(buffer.size())).gcount()) > 0) { response->write(&buffer[0], read_length); if(read_length == static_cast<streamsize>(buffer.size())) { response->send([response, ifs](const SimpleWeb::error_code &ec) { if(!ec) read_and_send(response, ifs); else cerr << "Connection interrupted" << endl; }); } } } }; FileServer::read_and_send(response, ifs); } else throw invalid_argument("could not read file"); } catch(const exception &e) { response->write(SimpleWeb::StatusCode::client_error_bad_request, "Could not open path " + request->path + ": " + e.what()); } }; server.on_error = [](shared_ptr<HttpServer::Request> /*request*/, const SimpleWeb::error_code & /*ec*/) { // Handle errors here // Note that connection timeouts will also call this handle with ec set to SimpleWeb::errc::operation_canceled }; // Start server and receive assigned port when server is listening for requests promise<unsigned short> server_port; thread server_thread([&server, &server_port]() { // Start server server.start([&server_port](unsigned short port) { server_port.set_value(port); }); }); cout << "Server listening on port " << server_port.get_future().get() << endl << endl; // Client examples string json_string = "{\"firstName\": \"John\",\"lastName\": \"Smith\",\"age\": 25}"; // Synchronous request examples { HttpClient client("localhost:8080"); try { cout << "Example GET request to http://localhost:8080/match/123" << endl; auto r1 = client.request("GET", "/match/123"); cout << "Response content: " << r1->content.rdbuf() << endl // Alternatively, use the convenience function r1->content.string() << endl; cout << "Example POST request to http://localhost:8080/string" << endl; auto r2 = client.request("POST", "/string", json_string); cout << "Response content: " << r2->content.rdbuf() << endl << endl; } catch(const SimpleWeb::system_error &e) { cerr << "Client request error: " << e.what() << endl; } } // Asynchronous request example { HttpClient client("localhost:8080"); cout << "Example POST request to http://localhost:8080/json" << endl; client.request("POST", "/json", json_string, [](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { if(!ec) cout << "Response content: " << response->content.rdbuf() << endl; }); client.io_service->run(); } server_thread.join(); } ================================================ FILE: C/thirdparty/Simple-Web-Server/https_examples.cpp ================================================ #include "client_https.hpp" #include "server_https.hpp" #include <future> // Added for the json-example #define BOOST_SPIRIT_THREADSAFE #include <boost/property_tree/json_parser.hpp> #include <boost/property_tree/ptree.hpp> // Added for the default_resource example #include "crypto.hpp" #include <algorithm> #include <boost/filesystem.hpp> #include <fstream> #include <vector> using namespace std; // Added for the json-example: using namespace boost::property_tree; using HttpsServer = SimpleWeb::Server<SimpleWeb::HTTPS>; using HttpsClient = SimpleWeb::Client<SimpleWeb::HTTPS>; int main() { // HTTPS-server at port 8080 using 1 thread // Unless you do more heavy non-threaded processing in the resources, // 1 thread is usually faster than several threads HttpsServer server("server.crt", "server.key"); server.config.port = 8080; // Add resources using path-regex and method-string, and an anonymous function // POST-example for the path /string, responds the posted string server.resource["^/string$"]["POST"] = [](shared_ptr<HttpsServer::Response> response, shared_ptr<HttpsServer::Request> request) { // Retrieve string: auto content = request->content.string(); // request->content.string() is a convenience function for: // stringstream ss; // ss << request->content.rdbuf(); // auto content=ss.str(); *response << "HTTP/1.1 200 OK\r\nContent-Length: " << content.length() << "\r\n\r\n" << content; // Alternatively, use one of the convenience functions, for instance: // response->write(content); }; // POST-example for the path /json, responds firstName+" "+lastName from the posted json // Responds with an appropriate error message if the posted json is not valid, or if firstName or lastName is missing // Example posted json: // { // "firstName": "John", // "lastName": "Smith", // "age": 25 // } server.resource["^/json$"]["POST"] = [](shared_ptr<HttpsServer::Response> response, shared_ptr<HttpsServer::Request> request) { try { ptree pt; read_json(request->content, pt); auto name = pt.get<string>("firstName") + " " + pt.get<string>("lastName"); *response << "HTTP/1.1 200 OK\r\n" << "Content-Length: " << name.length() << "\r\n\r\n" << name; } catch(const exception &e) { *response << "HTTP/1.1 400 Bad Request\r\nContent-Length: " << strlen(e.what()) << "\r\n\r\n" << e.what(); } // Alternatively, using a convenience function: // try { // ptree pt; // read_json(request->content, pt); // auto name=pt.get<string>("firstName")+" "+pt.get<string>("lastName"); // response->write(name); // } // catch(const exception &e) { // response->write(SimpleWeb::StatusCode::client_error_bad_request, e.what()); // } }; // GET-example for the path /info // Responds with request-information server.resource["^/info$"]["GET"] = [](shared_ptr<HttpsServer::Response> response, shared_ptr<HttpsServer::Request> request) { stringstream stream; stream << "<h1>Request from " << request->remote_endpoint().address().to_string() << ":" << request->remote_endpoint().port() << "</h1>"; stream << request->method << " " << request->path << " HTTP/" << request->http_version; stream << "<h2>Query Fields</h2>"; auto query_fields = request->parse_query_string(); for(auto &field : query_fields) stream << field.first << ": " << field.second << "<br>"; stream << "<h2>Header Fields</h2>"; for(auto &field : request->header) stream << field.first << ": " << field.second << "<br>"; response->write(stream); }; // GET-example for the path /match/[number], responds with the matched string in path (number) // For instance a request GET /match/123 will receive: 123 server.resource["^/match/([0-9]+)$"]["GET"] = [](shared_ptr<HttpsServer::Response> response, shared_ptr<HttpsServer::Request> request) { response->write(request->path_match[1].str()); }; // GET-example simulating heavy work in a separate thread server.resource["^/work$"]["GET"] = [](shared_ptr<HttpsServer::Response> response, shared_ptr<HttpsServer::Request> /*request*/) { thread work_thread([response] { this_thread::sleep_for(chrono::seconds(5)); response->write("Work done"); }); work_thread.detach(); }; // Default GET-example. If no other matches, this anonymous function will be called. // Will respond with content in the web/-directory, and its subdirectories. // Default file: index.html // Can for instance be used to retrieve an HTML 5 client that uses REST-resources on this server server.default_resource["GET"] = [](shared_ptr<HttpsServer::Response> response, shared_ptr<HttpsServer::Request> request) { try { auto web_root_path = boost::filesystem::canonical("web"); auto path = boost::filesystem::canonical(web_root_path / request->path); // Check if path is within web_root_path if(distance(web_root_path.begin(), web_root_path.end()) > distance(path.begin(), path.end()) || !equal(web_root_path.begin(), web_root_path.end(), path.begin())) throw invalid_argument("path must be within root path"); if(boost::filesystem::is_directory(path)) path /= "index.html"; SimpleWeb::CaseInsensitiveMultimap header; // Uncomment the following line to enable Cache-Control // header.emplace("Cache-Control", "max-age=86400"); #ifdef HAVE_OPENSSL // Uncomment the following lines to enable ETag // { // ifstream ifs(path.string(), ifstream::in | ios::binary); // if(ifs) { // auto hash = SimpleWeb::Crypto::to_hex_string(SimpleWeb::Crypto::md5(ifs)); // header.emplace("ETag", "\"" + hash + "\""); // auto it = request->header.find("If-None-Match"); // if(it != request->header.end()) { // if(!it->second.empty() && it->second.compare(1, hash.size(), hash) == 0) { // response->write(SimpleWeb::StatusCode::redirection_not_modified, header); // return; // } // } // } // else // throw invalid_argument("could not read file"); // } #endif auto ifs = make_shared<ifstream>(); ifs->open(path.string(), ifstream::in | ios::binary | ios::ate); if(*ifs) { auto length = ifs->tellg(); ifs->seekg(0, ios::beg); header.emplace("Content-Length", to_string(length)); response->write(header); // Trick to define a recursive function within this scope (for example purposes) class FileServer { public: static void read_and_send(const shared_ptr<HttpsServer::Response> &response, const shared_ptr<ifstream> &ifs) { // Read and send 128 KB at a time static vector<char> buffer(131072); // Safe when server is running on one thread streamsize read_length; if((read_length = ifs->read(&buffer[0], static_cast<streamsize>(buffer.size())).gcount()) > 0) { response->write(&buffer[0], read_length); if(read_length == static_cast<streamsize>(buffer.size())) { response->send([response, ifs](const SimpleWeb::error_code &ec) { if(!ec) read_and_send(response, ifs); else cerr << "Connection interrupted" << endl; }); } } } }; FileServer::read_and_send(response, ifs); } else throw invalid_argument("could not read file"); } catch(const exception &e) { response->write(SimpleWeb::StatusCode::client_error_bad_request, "Could not open path " + request->path + ": " + e.what()); } }; server.on_error = [](shared_ptr<HttpsServer::Request> /*request*/, const SimpleWeb::error_code & /*ec*/) { // Handle errors here // Note that connection timeouts will also call this handle with ec set to SimpleWeb::errc::operation_canceled }; // Start server and receive assigned port when server is listening for requests promise<unsigned short> server_port; thread server_thread([&server, &server_port]() { // Start server server.start([&server_port](unsigned short port) { server_port.set_value(port); }); }); cout << "Server listening on port " << server_port.get_future().get() << endl << endl; // Client examples string json_string = "{\"firstName\": \"John\",\"lastName\": \"Smith\",\"age\": 25}"; // Synchronous request examples { HttpsClient client("localhost:8080", false); try { cout << "Example GET request to http://localhost:8080/match/123" << endl; auto r1 = client.request("GET", "/match/123"); cout << "Response content: " << r1->content.rdbuf() << endl // Alternatively, use the convenience function r1->content.string() << endl; cout << "Example POST request to http://localhost:8080/string" << endl; auto r2 = client.request("POST", "/string", json_string); cout << "Response content: " << r2->content.rdbuf() << endl << endl; } catch(const SimpleWeb::system_error &e) { cerr << "Client request error: " << e.what() << endl; } } // Asynchronous request example { HttpsClient client("localhost:8080", false); cout << "Example POST request to http://localhost:8080/json" << endl; client.request("POST", "/json", json_string, [](shared_ptr<HttpsClient::Response> response, const SimpleWeb::error_code &ec) { if(!ec) cout << "Response content: " << response->content.rdbuf() << endl; }); client.io_service->run(); } server_thread.join(); } ================================================ FILE: C/thirdparty/Simple-Web-Server/mutex.hpp ================================================ // Based on https://clang.llvm.org/docs/ThreadSafetyAnalysis.html #ifndef SIMPLE_WEB_MUTEX_HPP #define SIMPLE_WEB_MUTEX_HPP #include <mutex> // Enable thread safety attributes only with clang. #if defined(__clang__) && (!defined(SWIG)) #define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) #else #define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op #endif #define CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) #define SCOPED_CAPABILITY \ THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) #define GUARDED_BY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) #define PT_GUARDED_BY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) #define ACQUIRED_BEFORE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) #define ACQUIRED_AFTER(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) #define REQUIRES(...) \ THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) #define REQUIRES_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) #define ACQUIRE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) #define ACQUIRE_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) #define RELEASE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) #define RELEASE_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) #define TRY_ACQUIRE(...) \ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) #define TRY_ACQUIRE_SHARED(...) \ THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) #define EXCLUDES(...) \ THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) #define ASSERT_CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) #define ASSERT_SHARED_CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) #define RETURN_CAPABILITY(x) \ THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) #define NO_THREAD_SAFETY_ANALYSIS \ THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) namespace SimpleWeb { /// Mutex class that is annotated for Clang Thread Safety Analysis. class CAPABILITY("mutex") Mutex { std::mutex mutex; public: void lock() ACQUIRE() { mutex.lock(); } void unlock() RELEASE() { mutex.unlock(); } }; /// Scoped mutex guard class that is annotated for Clang Thread Safety Analysis. class SCOPED_CAPABILITY LockGuard { Mutex &mutex; bool locked = true; public: LockGuard(Mutex &mutex_) ACQUIRE(mutex_) : mutex(mutex_) { mutex.lock(); } void unlock() RELEASE() { mutex.unlock(); locked = false; } ~LockGuard() RELEASE() { if(locked) mutex.unlock(); } }; } // namespace SimpleWeb #endif // SIMPLE_WEB_MUTEX_HPP ================================================ FILE: C/thirdparty/Simple-Web-Server/server_http.hpp ================================================ #ifndef SIMPLE_WEB_SERVER_HTTP_HPP #define SIMPLE_WEB_SERVER_HTTP_HPP #include "asio_compatibility.hpp" #include "mutex.hpp" #include "utility.hpp" #include <functional> #include <iostream> #include <limits> #include <list> #include <map> #include <sstream> #include <thread> #include <unordered_set> // Late 2017 TODO: remove the following checks and always use std::regex #ifdef USE_BOOST_REGEX #include <boost/regex.hpp> namespace SimpleWeb { namespace regex = boost; } #else #include <regex> namespace SimpleWeb { namespace regex = std; } #endif namespace SimpleWeb { template <class socket_type> class Server; template <class socket_type> class ServerBase { protected: class Connection; class Session; public: /// Response class where the content of the response is sent to client when the object is about to be destroyed. class Response : public std::enable_shared_from_this<Response>, public std::ostream { friend class ServerBase<socket_type>; friend class Server<socket_type>; std::unique_ptr<asio::streambuf> streambuf = std::unique_ptr<asio::streambuf>(new asio::streambuf()); std::shared_ptr<Session> session; long timeout_content; Mutex send_queue_mutex; std::list<std::pair<std::shared_ptr<asio::streambuf>, std::function<void(const error_code &)>>> send_queue GUARDED_BY(send_queue_mutex); Response(std::shared_ptr<Session> session_, long timeout_content) noexcept : std::ostream(nullptr), session(std::move(session_)), timeout_content(timeout_content) { rdbuf(streambuf.get()); } template <typename size_type> void write_header(const CaseInsensitiveMultimap &header, size_type size) { bool content_length_written = false; bool chunked_transfer_encoding = false; for(auto &field : header) { if(!content_length_written && case_insensitive_equal(field.first, "content-length")) content_length_written = true; else if(!chunked_transfer_encoding && case_insensitive_equal(field.first, "transfer-encoding") && case_insensitive_equal(field.second, "chunked")) chunked_transfer_encoding = true; *this << field.first << ": " << field.second << "\r\n"; } if(!content_length_written && !chunked_transfer_encoding && !close_connection_after_response) *this << "Content-Length: " << size << "\r\n\r\n"; else *this << "\r\n"; } void send_from_queue() REQUIRES(send_queue_mutex) { auto self = this->shared_from_this(); asio::async_write(*self->session->connection->socket, *send_queue.begin()->first, [self](const error_code &ec, std::size_t /*bytes_transferred*/) { auto lock = self->session->connection->handler_runner->continue_lock(); if(!lock) return; { LockGuard lock(self->send_queue_mutex); if(!ec) { auto it = self->send_queue.begin(); auto callback = std::move(it->second); self->send_queue.erase(it); if(self->send_queue.size() > 0) self->send_from_queue(); lock.unlock(); if(callback) callback(ec); } else { // All handlers in the queue is called with ec: std::vector<std::function<void(const error_code &)>> callbacks; for(auto &pair : self->send_queue) { if(pair.second) callbacks.emplace_back(std::move(pair.second)); } self->send_queue.clear(); lock.unlock(); for(auto &callback : callbacks) callback(ec); } } }); } void send_on_delete(const std::function<void(const error_code &)> &callback = nullptr) noexcept { auto self = this->shared_from_this(); // Keep Response instance alive through the following async_write asio::async_write(*session->connection->socket, *streambuf, [self, callback](const error_code &ec, std::size_t /*bytes_transferred*/) { auto lock = self->session->connection->handler_runner->continue_lock(); if(!lock) return; if(callback) callback(ec); }); } public: std::size_t size() noexcept { return streambuf->size(); } /// Send the content of the response stream to client. The callback is called when the send has completed. /// /// Use this function if you need to recursively send parts of a longer message, or when using server-sent events. void send(std::function<void(const error_code &)> callback = nullptr) noexcept { std::shared_ptr<asio::streambuf> streambuf = std::move(this->streambuf); this->streambuf = std::unique_ptr<asio::streambuf>(new asio::streambuf()); rdbuf(this->streambuf.get()); LockGuard lock(send_queue_mutex); send_queue.emplace_back(std::move(streambuf), std::move(callback)); if(send_queue.size() == 1) send_from_queue(); } /// Write directly to stream buffer using std::ostream::write. void write(const char_type *ptr, std::streamsize n) { std::ostream::write(ptr, n); } /// Convenience function for writing status line, potential header fields, and empty content. void write(StatusCode status_code = StatusCode::success_ok, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { *this << "HTTP/1.1 " << SimpleWeb::status_code(status_code) << "\r\n"; write_header(header, 0); } /// Convenience function for writing status line, header fields, and content. void write(StatusCode status_code, string_view content, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { *this << "HTTP/1.1 " << SimpleWeb::status_code(status_code) << "\r\n"; write_header(header, content.size()); if(!content.empty()) *this << content; } /// Convenience function for writing status line, header fields, and content. void write(StatusCode status_code, std::istream &content, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { *this << "HTTP/1.1 " << SimpleWeb::status_code(status_code) << "\r\n"; content.seekg(0, std::ios::end); auto size = content.tellg(); content.seekg(0, std::ios::beg); write_header(header, size); if(size) *this << content.rdbuf(); } /// Convenience function for writing success status line, header fields, and content. void write(string_view content, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { write(StatusCode::success_ok, content, header); } /// Convenience function for writing success status line, header fields, and content. void write(std::istream &content, const CaseInsensitiveMultimap &header = CaseInsensitiveMultimap()) { write(StatusCode::success_ok, content, header); } /// Convenience function for writing success status line, and header fields. void write(const CaseInsensitiveMultimap &header) { write(StatusCode::success_ok, std::string(), header); } /// If set to true, force server to close the connection after the response have been sent. /// /// This is useful when implementing a HTTP/1.0-server sending content /// without specifying the content length. bool close_connection_after_response = false; }; class Content : public std::istream { friend class ServerBase<socket_type>; public: std::size_t size() noexcept { return streambuf.size(); } /// Convenience function to return content data without copy const std::uint8_t* data() noexcept { return asio::buffer_cast<const uint8_t *>(streambuf.data()); } /// Convenience function to return content as std::string. std::string string() noexcept { return std::string(asio::buffers_begin(streambuf.data()), asio::buffers_end(streambuf.data())); } private: asio::streambuf &streambuf; Content(asio::streambuf &streambuf) noexcept : std::istream(&streambuf), streambuf(streambuf) {} }; class Request { friend class ServerBase<socket_type>; friend class Server<socket_type>; friend class Session; asio::streambuf streambuf; std::weak_ptr<Connection> connection; std::string optimization = std::to_string(0); // TODO: figure out what goes wrong in gcc optimization without this line Request(std::size_t max_request_streambuf_size, const std::shared_ptr<Connection> &connection_) noexcept : streambuf(max_request_streambuf_size), connection(connection_), content(streambuf) {} public: std::string method, path, query_string, http_version; Content content; CaseInsensitiveMultimap header; /// The result of the resource regular expression match of the request path. regex::smatch path_match; /// The time point when the request header was fully read. std::chrono::system_clock::time_point header_read_time; asio::ip::tcp::endpoint remote_endpoint() const noexcept { try { if(auto connection = this->connection.lock()) return connection->socket->lowest_layer().remote_endpoint(); } catch(...) { } return asio::ip::tcp::endpoint(); } asio::ip::tcp::endpoint local_endpoint() const noexcept { try { if(auto connection = this->connection.lock()) return connection->socket->lowest_layer().local_endpoint(); } catch(...) { } return asio::ip::tcp::endpoint(); } /// Deprecated, please use remote_endpoint().address().to_string() instead. DEPRECATED std::string remote_endpoint_address() const noexcept { try { if(auto connection = this->connection.lock()) return connection->socket->lowest_layer().remote_endpoint().address().to_string(); } catch(...) { } return std::string(); } /// Deprecated, please use remote_endpoint().port() instead. DEPRECATED unsigned short remote_endpoint_port() const noexcept { try { if(auto connection = this->connection.lock()) return connection->socket->lowest_layer().remote_endpoint().port(); } catch(...) { } return 0; } /// Returns query keys with percent-decoded values. CaseInsensitiveMultimap parse_query_string() const noexcept { return SimpleWeb::QueryString::parse(query_string); } }; protected: class Connection : public std::enable_shared_from_this<Connection> { public: template <typename... Args> Connection(std::shared_ptr<ScopeRunner> handler_runner_, Args &&... args) noexcept : handler_runner(std::move(handler_runner_)), socket(new socket_type(std::forward<Args>(args)...)) {} std::shared_ptr<ScopeRunner> handler_runner; std::unique_ptr<socket_type> socket; // Socket must be unique_ptr since asio::ssl::stream<asio::ip::tcp::socket> is not movable std::unique_ptr<asio::steady_timer> timer; void close() noexcept { error_code ec; socket->lowest_layer().shutdown(asio::ip::tcp::socket::shutdown_both, ec); socket->lowest_layer().cancel(ec); } void set_timeout(long seconds) noexcept { if(seconds == 0) { timer = nullptr; return; } timer = make_steady_timer(*socket, std::chrono::seconds(seconds)); std::weak_ptr<Connection> self_weak(this->shared_from_this()); // To avoid keeping Connection instance alive longer than needed timer->async_wait([self_weak](const error_code &ec) { if(!ec) { if(auto self = self_weak.lock()) self->close(); } }); } void cancel_timeout() noexcept { if(timer) { try { timer->cancel(); } catch(...) { } } } }; class Session { public: Session(std::size_t max_request_streambuf_size, std::shared_ptr<Connection> connection_) noexcept : connection(std::move(connection_)), request(new Request(max_request_streambuf_size, connection)) {} std::shared_ptr<Connection> connection; std::shared_ptr<Request> request; }; public: class Config { friend class ServerBase<socket_type>; Config(unsigned short port) noexcept : port(port) {} public: /// Port number to use. Defaults to 80 for HTTP and 443 for HTTPS. Set to 0 get an assigned port. unsigned short port; /// If io_service is not set, number of threads that the server will use when start() is called. /// Defaults to 1 thread. std::size_t thread_pool_size = 1; /// Timeout on request completion. Defaults to 5 seconds. long timeout_request = 5; /// Timeout on request/response content completion. Defaults to 300 seconds. long timeout_content = 300; /// Maximum size of request stream buffer. Defaults to architecture maximum. /// Reaching this limit will result in a message_size error code. std::size_t max_request_streambuf_size = (std::numeric_limits<std::size_t>::max)(); /// IPv4 address in dotted decimal form or IPv6 address in hexadecimal notation. /// If empty, the address will be any address. std::string address; /// Set to false to avoid binding the socket to an address that is already in use. Defaults to true. bool reuse_address = true; /// Make use of RFC 7413 or TCP Fast Open (TFO) bool fast_open = false; }; /// Set before calling start(). Config config; private: class regex_orderable : public regex::regex { public: std::string str; regex_orderable(const char *regex_cstr) : regex::regex(regex_cstr), str(regex_cstr) {} regex_orderable(std::string regex_str_) : regex::regex(regex_str_), str(std::move(regex_str_)) {} bool operator<(const regex_orderable &rhs) const noexcept { return str < rhs.str; } }; public: /// Use this container to add resources for specific request paths depending on the given regex and method. /// Warning: do not add or remove resources after start() is called std::map<regex_orderable, std::map<std::string, std::function<void(std::shared_ptr<typename ServerBase<socket_type>::Response>, std::shared_ptr<typename ServerBase<socket_type>::Request>)>>> resource; /// If the request path does not match a resource regex, this function is called. std::map<std::string, std::function<void(std::shared_ptr<typename ServerBase<socket_type>::Response>, std::shared_ptr<typename ServerBase<socket_type>::Request>)>> default_resource; /// Called when an error occurs. std::function<void(std::shared_ptr<typename ServerBase<socket_type>::Request>, const error_code &)> on_error; /// Called on upgrade requests. std::function<void(std::unique_ptr<socket_type> &, std::shared_ptr<typename ServerBase<socket_type>::Request>)> on_upgrade; /// If you want to reuse an already created asio::io_service, store its pointer here before calling start(). std::shared_ptr<io_context> io_service; /// Start the server. /// If io_service is not set, an internal io_service is created instead. /// The callback argument is called after the server is accepting connections, /// where its parameter contains the assigned port. void start(const std::function<void(unsigned short /*port*/)> &callback = nullptr) { std::unique_lock<std::mutex> lock(start_stop_mutex); asio::ip::tcp::endpoint endpoint; if(!config.address.empty()) endpoint = asio::ip::tcp::endpoint(make_address(config.address), config.port); else endpoint = asio::ip::tcp::endpoint(asio::ip::tcp::v6(), config.port); if(!io_service) { io_service = std::make_shared<io_context>(); internal_io_service = true; } if(!acceptor) acceptor = std::unique_ptr<asio::ip::tcp::acceptor>(new asio::ip::tcp::acceptor(*io_service)); try { acceptor->open(endpoint.protocol()); } catch(const system_error &error) { if(error.code() == asio::error::address_family_not_supported && config.address.empty()) { endpoint = asio::ip::tcp::endpoint(asio::ip::tcp::v4(), config.port); acceptor->open(endpoint.protocol()); } else throw; } acceptor->set_option(asio::socket_base::reuse_address(config.reuse_address)); if(config.fast_open) { #if defined(__linux__) && defined(TCP_FASTOPEN) const int qlen = 5; // This seems to be the value that is used in other examples. error_code ec; acceptor->set_option(asio::detail::socket_option::integer<IPPROTO_TCP, TCP_FASTOPEN>(qlen), ec); #endif // End Linux } acceptor->bind(endpoint); after_bind(); auto port = acceptor->local_endpoint().port(); acceptor->listen(); accept(); if(internal_io_service && io_service->stopped()) restart(*io_service); if(callback) post(*io_service, [callback, port] { callback(port); }); if(internal_io_service) { // If thread_pool_size>1, start m_io_service.run() in (thread_pool_size-1) threads for thread-pooling threads.clear(); for(std::size_t c = 1; c < config.thread_pool_size; c++) { threads.emplace_back([this]() { this->io_service->run(); }); } lock.unlock(); // Main thread if(config.thread_pool_size > 0) io_service->run(); lock.lock(); // Wait for the rest of the threads, if any, to finish as well for(auto &t : threads) t.join(); } } // MR - added method to return the port we are listening on unsigned short getLocalPort() { if (acceptor) { boost::asio::ip::tcp::endpoint endpoint = acceptor->local_endpoint(); return endpoint.port(); } else { return 0; } } /// Stop accepting new requests, and close current connections. void stop() noexcept { std::lock_guard<std::mutex> lock(start_stop_mutex); if(acceptor) { error_code ec; acceptor->close(ec); { LockGuard lock(connections->mutex); for(auto &connection : connections->set) connection->close(); connections->set.clear(); } if(internal_io_service) io_service->stop(); } } virtual ~ServerBase() noexcept { handler_runner->stop(); stop(); } protected: std::mutex start_stop_mutex; bool internal_io_service = false; std::unique_ptr<asio::ip::tcp::acceptor> acceptor; std::vector<std::thread> threads; struct Connections { Mutex mutex; std::unordered_set<Connection *> set GUARDED_BY(mutex); }; std::shared_ptr<Connections> connections; std::shared_ptr<ScopeRunner> handler_runner; ServerBase(unsigned short port) noexcept : config(port), connections(new Connections()), handler_runner(new ScopeRunner()) {} virtual void after_bind() {} virtual void accept() = 0; template <typename... Args> std::shared_ptr<Connection> create_connection(Args &&... args) noexcept { auto connections = this->connections; auto connection = std::shared_ptr<Connection>(new Connection(handler_runner, std::forward<Args>(args)...), [connections](Connection *connection) { { LockGuard lock(connections->mutex); auto it = connections->set.find(connection); if(it != connections->set.end()) connections->set.erase(it); } delete connection; }); { LockGuard lock(connections->mutex); connections->set.emplace(connection.get()); } return connection; } void read(const std::shared_ptr<Session> &session) { session->connection->set_timeout(config.timeout_request); asio::async_read_until(*session->connection->socket, session->request->streambuf, "\r\n\r\n", [this, session](const error_code &ec, std::size_t bytes_transferred) { session->connection->set_timeout(config.timeout_content); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; session->request->header_read_time = std::chrono::system_clock::now(); if(!ec) { // request->streambuf.size() is not necessarily the same as bytes_transferred, from Boost-docs: // "After a successful async_read_until operation, the streambuf may contain additional data beyond the delimiter" // The chosen solution is to extract lines from the stream directly when parsing the header. What is left of the // streambuf (maybe some bytes of the content) is appended to in the async_read-function below (for retrieving content). std::size_t num_additional_bytes = session->request->streambuf.size() - bytes_transferred; if(!RequestMessage::parse(session->request->content, session->request->method, session->request->path, session->request->query_string, session->request->http_version, session->request->header)) { if(this->on_error) this->on_error(session->request, make_error_code::make_error_code(errc::protocol_error)); return; } // If content, read that as well auto header_it = session->request->header.find("Content-Length"); if(header_it != session->request->header.end()) { unsigned long long content_length = 0; try { content_length = std::stoull(header_it->second); } catch(const std::exception &) { if(this->on_error) this->on_error(session->request, make_error_code::make_error_code(errc::protocol_error)); return; } if(content_length > session->request->streambuf.max_size()) { auto response = std::shared_ptr<Response>(new Response(session, this->config.timeout_content)); response->write(StatusCode::client_error_payload_too_large); if(this->on_error) this->on_error(session->request, make_error_code::make_error_code(errc::message_size)); return; } if(content_length > num_additional_bytes) { asio::async_read(*session->connection->socket, session->request->streambuf, asio::transfer_exactly(content_length - num_additional_bytes), [this, session](const error_code &ec, std::size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) this->find_resource(session); else if(this->on_error) this->on_error(session->request, ec); }); } else this->find_resource(session); } else if((header_it = session->request->header.find("Transfer-Encoding")) != session->request->header.end() && header_it->second == "chunked") { // Expect hex number to not exceed 16 bytes (64-bit number), but take into account previous additional read bytes auto chunk_size_streambuf = std::make_shared<asio::streambuf>(std::max<std::size_t>(16 + 2, session->request->streambuf.size())); // Move leftover bytes auto &source = session->request->streambuf; auto &target = *chunk_size_streambuf; target.commit(asio::buffer_copy(target.prepare(source.size()), source.data())); source.consume(source.size()); this->read_chunked_transfer_encoded(session, chunk_size_streambuf); } else this->find_resource(session); } else if(this->on_error) this->on_error(session->request, ec); }); } void read_chunked_transfer_encoded(const std::shared_ptr<Session> &session, const std::shared_ptr<asio::streambuf> &chunk_size_streambuf) { asio::async_read_until(*session->connection->socket, *chunk_size_streambuf, "\r\n", [this, session, chunk_size_streambuf](const error_code &ec, size_t bytes_transferred) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { std::istream istream(chunk_size_streambuf.get()); std::string line; std::getline(istream, line); bytes_transferred -= line.size() + 1; unsigned long chunk_size = 0; try { chunk_size = std::stoul(line, 0, 16); } catch(...) { if(this->on_error) this->on_error(session->request, make_error_code::make_error_code(errc::protocol_error)); return; } if(chunk_size == 0) { this->find_resource(session); return; } if(chunk_size + session->request->streambuf.size() > session->request->streambuf.max_size()) { auto response = std::shared_ptr<Response>(new Response(session, this->config.timeout_content)); response->write(StatusCode::client_error_payload_too_large); if(this->on_error) this->on_error(session->request, make_error_code::make_error_code(errc::message_size)); return; } auto num_additional_bytes = chunk_size_streambuf->size() - bytes_transferred; auto bytes_to_move = std::min<std::size_t>(chunk_size, num_additional_bytes); if(bytes_to_move > 0) { // Move leftover bytes auto &source = *chunk_size_streambuf; auto &target = session->request->streambuf; target.commit(asio::buffer_copy(target.prepare(bytes_to_move), source.data(), bytes_to_move)); source.consume(bytes_to_move); } if(chunk_size > num_additional_bytes) { asio::async_read(*session->connection->socket, session->request->streambuf, asio::transfer_exactly(chunk_size - num_additional_bytes), [this, session, chunk_size_streambuf](const error_code &ec, size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) { // Remove "\r\n" auto null_buffer = std::make_shared<asio::streambuf>(2); asio::async_read(*session->connection->socket, *null_buffer, asio::transfer_exactly(2), [this, session, chunk_size_streambuf, null_buffer](const error_code &ec, size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) read_chunked_transfer_encoded(session, chunk_size_streambuf); else this->on_error(session->request, ec); }); } else if(this->on_error) this->on_error(session->request, ec); }); } else if(2 + chunk_size > num_additional_bytes) { // If only end of chunk remains unread (\n or \r\n) // Remove "\r\n" if(2 + chunk_size - num_additional_bytes == 1) istream.get(); auto null_buffer = std::make_shared<asio::streambuf>(2); asio::async_read(*session->connection->socket, *null_buffer, asio::transfer_exactly(2 + chunk_size - num_additional_bytes), [this, session, chunk_size_streambuf, null_buffer](const error_code &ec, size_t /*bytes_transferred*/) { auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) read_chunked_transfer_encoded(session, chunk_size_streambuf); else this->on_error(session->request, ec); }); } else { // Remove "\r\n" istream.get(); istream.get(); read_chunked_transfer_encoded(session, chunk_size_streambuf); } } else if(this->on_error) this->on_error(session->request, ec); }); } void find_resource(const std::shared_ptr<Session> &session) { // Upgrade connection if(on_upgrade) { auto it = session->request->header.find("Upgrade"); if(it != session->request->header.end()) { // remove connection from connections { LockGuard lock(connections->mutex); auto it = connections->set.find(session->connection.get()); if(it != connections->set.end()) connections->set.erase(it); } on_upgrade(session->connection->socket, session->request); return; } } // Find path- and method-match, and call write for(auto ®ex_method : resource) { auto it = regex_method.second.find(session->request->method); if(it != regex_method.second.end()) { regex::smatch sm_res; if(regex::regex_match(session->request->path, sm_res, regex_method.first)) { session->request->path_match = std::move(sm_res); write(session, it->second); return; } } } auto it = default_resource.find(session->request->method); if(it != default_resource.end()) write(session, it->second); } void write(const std::shared_ptr<Session> &session, std::function<void(std::shared_ptr<typename ServerBase<socket_type>::Response>, std::shared_ptr<typename ServerBase<socket_type>::Request>)> &resource_function) { auto response = std::shared_ptr<Response>(new Response(session, config.timeout_content), [this](Response *response_ptr) { auto response = std::shared_ptr<Response>(response_ptr); response->send_on_delete([this, response](const error_code &ec) { response->session->connection->cancel_timeout(); if(!ec) { if(response->close_connection_after_response) return; auto range = response->session->request->header.equal_range("Connection"); for(auto it = range.first; it != range.second; it++) { if(case_insensitive_equal(it->second, "close")) return; else if(case_insensitive_equal(it->second, "keep-alive")) { auto new_session = std::make_shared<Session>(this->config.max_request_streambuf_size, response->session->connection); this->read(new_session); return; } } if(response->session->request->http_version >= "1.1") { auto new_session = std::make_shared<Session>(this->config.max_request_streambuf_size, response->session->connection); this->read(new_session); return; } } else if(this->on_error) this->on_error(response->session->request, ec); }); }); try { resource_function(response, session->request); } catch(const std::exception &) { if(on_error) on_error(session->request, make_error_code::make_error_code(errc::operation_canceled)); return; } } }; template <class socket_type> class Server : public ServerBase<socket_type> {}; using HTTP = asio::ip::tcp::socket; template <> class Server<HTTP> : public ServerBase<HTTP> { public: /// Constructs a server object. Server() noexcept : ServerBase<HTTP>::ServerBase(80) {} protected: void accept() override { auto connection = create_connection(*io_service); acceptor->async_accept(*connection->socket, [this, connection](const error_code &ec) { auto lock = connection->handler_runner->continue_lock(); if(!lock) return; // Immediately start accepting a new connection (unless io_service has been stopped) if(ec != error::operation_aborted) this->accept(); auto session = std::make_shared<Session>(config.max_request_streambuf_size, connection); if(!ec) { asio::ip::tcp::no_delay option(true); error_code ec; session->connection->socket->set_option(option, ec); this->read(session); } else if(this->on_error) this->on_error(session->request, ec); }); } }; } // namespace SimpleWeb #endif /* SIMPLE_WEB_SERVER_HTTP_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/server_https.hpp ================================================ #ifndef SIMPLE_WEB_SERVER_HTTPS_HPP #define SIMPLE_WEB_SERVER_HTTPS_HPP #include "server_http.hpp" #ifdef USE_STANDALONE_ASIO #include <asio/ssl.hpp> #else #include <boost/asio/ssl.hpp> #endif #include <algorithm> #include <openssl/ssl.h> namespace SimpleWeb { using HTTPS = asio::ssl::stream<asio::ip::tcp::socket>; template <> class Server<HTTPS> : public ServerBase<HTTPS> { bool set_session_id_context = false; public: /** * Constructs a server object. * * @param certification_file If non-empty, sends the given certification file to client. * @param private_key_file Specifies the file containing the private key for certification_file. * @param verify_file If non-empty, use this certificate authority file to perform verification of client's certificate and hostname according to RFC 2818. */ Server(const std::string &certification_file, const std::string &private_key_file, const std::string &verify_file = std::string()) : ServerBase<HTTPS>::ServerBase(443), context(asio::ssl::context::tlsv12) { context.use_certificate_chain_file(certification_file); context.use_private_key_file(private_key_file, asio::ssl::context::pem); if(verify_file.size() > 0) { context.load_verify_file(verify_file); context.set_verify_mode(asio::ssl::verify_peer | asio::ssl::verify_fail_if_no_peer_cert | asio::ssl::verify_client_once); set_session_id_context = true; } } protected: asio::ssl::context context; void after_bind() override { if(set_session_id_context) { // Creating session_id_context from address:port but reversed due to small SSL_MAX_SSL_SESSION_ID_LENGTH auto session_id_context = std::to_string(acceptor->local_endpoint().port()) + ':'; session_id_context.append(config.address.rbegin(), config.address.rend()); SSL_CTX_set_session_id_context(context.native_handle(), reinterpret_cast<const unsigned char *>(session_id_context.data()), static_cast<unsigned int>(std::min<std::size_t>(session_id_context.size(), SSL_MAX_SSL_SESSION_ID_LENGTH))); } } void accept() override { auto connection = create_connection(*io_service, context); acceptor->async_accept(connection->socket->lowest_layer(), [this, connection](const error_code &ec) { auto lock = connection->handler_runner->continue_lock(); if(!lock) return; if(ec != error::operation_aborted) this->accept(); auto session = std::make_shared<Session>(config.max_request_streambuf_size, connection); if(!ec) { asio::ip::tcp::no_delay option(true); error_code ec; session->connection->socket->lowest_layer().set_option(option, ec); session->connection->set_timeout(config.timeout_request); session->connection->socket->async_handshake(asio::ssl::stream_base::server, [this, session](const error_code &ec) { session->connection->cancel_timeout(); auto lock = session->connection->handler_runner->continue_lock(); if(!lock) return; if(!ec) this->read(session); else if(this->on_error) this->on_error(session->request, ec); }); } else if(this->on_error) this->on_error(session->request, ec); }); } }; } // namespace SimpleWeb #endif /* SIMPLE_WEB_SERVER_HTTPS_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/status_code.hpp ================================================ #ifndef SIMPLE_WEB_STATUS_CODE_HPP #define SIMPLE_WEB_STATUS_CODE_HPP #include <cstdlib> #include <map> #include <string> #include <unordered_map> #include <vector> namespace SimpleWeb { enum class StatusCode { unknown = 0, information_continue = 100, information_switching_protocols, information_processing, success_ok = 200, success_created, success_accepted, success_non_authoritative_information, success_no_content, success_reset_content, success_partial_content, success_multi_status, success_already_reported, success_im_used = 226, redirection_multiple_choices = 300, redirection_moved_permanently, redirection_found, redirection_see_other, redirection_not_modified, redirection_use_proxy, redirection_switch_proxy, redirection_temporary_redirect, redirection_permanent_redirect, client_error_bad_request = 400, client_error_unauthorized, client_error_payment_required, client_error_forbidden, client_error_not_found, client_error_method_not_allowed, client_error_not_acceptable, client_error_proxy_authentication_required, client_error_request_timeout, client_error_conflict, client_error_gone, client_error_length_required, client_error_precondition_failed, client_error_payload_too_large, client_error_uri_too_long, client_error_unsupported_media_type, client_error_range_not_satisfiable, client_error_expectation_failed, client_error_im_a_teapot, client_error_misdirection_required = 421, client_error_unprocessable_entity, client_error_locked, client_error_failed_dependency, client_error_upgrade_required = 426, client_error_precondition_required = 428, client_error_too_many_requests, client_error_request_header_fields_too_large = 431, client_error_unavailable_for_legal_reasons = 451, server_error_internal_server_error = 500, server_error_not_implemented, server_error_bad_gateway, server_error_service_unavailable, server_error_gateway_timeout, server_error_http_version_not_supported, server_error_variant_also_negotiates, server_error_insufficient_storage, server_error_loop_detected, server_error_not_extended = 510, server_error_network_authentication_required }; inline const std::map<StatusCode, std::string> &status_code_strings() { static const std::map<StatusCode, std::string> status_code_strings = { {StatusCode::unknown, ""}, {StatusCode::information_continue, "100 Continue"}, {StatusCode::information_switching_protocols, "101 Switching Protocols"}, {StatusCode::information_processing, "102 Processing"}, {StatusCode::success_ok, "200 OK"}, {StatusCode::success_created, "201 Created"}, {StatusCode::success_accepted, "202 Accepted"}, {StatusCode::success_non_authoritative_information, "203 Non-Authoritative Information"}, {StatusCode::success_no_content, "204 No Content"}, {StatusCode::success_reset_content, "205 Reset Content"}, {StatusCode::success_partial_content, "206 Partial Content"}, {StatusCode::success_multi_status, "207 Multi-Status"}, {StatusCode::success_already_reported, "208 Already Reported"}, {StatusCode::success_im_used, "226 IM Used"}, {StatusCode::redirection_multiple_choices, "300 Multiple Choices"}, {StatusCode::redirection_moved_permanently, "301 Moved Permanently"}, {StatusCode::redirection_found, "302 Found"}, {StatusCode::redirection_see_other, "303 See Other"}, {StatusCode::redirection_not_modified, "304 Not Modified"}, {StatusCode::redirection_use_proxy, "305 Use Proxy"}, {StatusCode::redirection_switch_proxy, "306 Switch Proxy"}, {StatusCode::redirection_temporary_redirect, "307 Temporary Redirect"}, {StatusCode::redirection_permanent_redirect, "308 Permanent Redirect"}, {StatusCode::client_error_bad_request, "400 Bad Request"}, {StatusCode::client_error_unauthorized, "401 Unauthorized"}, {StatusCode::client_error_payment_required, "402 Payment Required"}, {StatusCode::client_error_forbidden, "403 Forbidden"}, {StatusCode::client_error_not_found, "404 Not Found"}, {StatusCode::client_error_method_not_allowed, "405 Method Not Allowed"}, {StatusCode::client_error_not_acceptable, "406 Not Acceptable"}, {StatusCode::client_error_proxy_authentication_required, "407 Proxy Authentication Required"}, {StatusCode::client_error_request_timeout, "408 Request Timeout"}, {StatusCode::client_error_conflict, "409 Conflict"}, {StatusCode::client_error_gone, "410 Gone"}, {StatusCode::client_error_length_required, "411 Length Required"}, {StatusCode::client_error_precondition_failed, "412 Precondition Failed"}, {StatusCode::client_error_payload_too_large, "413 Payload Too Large"}, {StatusCode::client_error_uri_too_long, "414 URI Too Long"}, {StatusCode::client_error_unsupported_media_type, "415 Unsupported Media Type"}, {StatusCode::client_error_range_not_satisfiable, "416 Range Not Satisfiable"}, {StatusCode::client_error_expectation_failed, "417 Expectation Failed"}, {StatusCode::client_error_im_a_teapot, "418 I'm a teapot"}, {StatusCode::client_error_misdirection_required, "421 Misdirected Request"}, {StatusCode::client_error_unprocessable_entity, "422 Unprocessable Entity"}, {StatusCode::client_error_locked, "423 Locked"}, {StatusCode::client_error_failed_dependency, "424 Failed Dependency"}, {StatusCode::client_error_upgrade_required, "426 Upgrade Required"}, {StatusCode::client_error_precondition_required, "428 Precondition Required"}, {StatusCode::client_error_too_many_requests, "429 Too Many Requests"}, {StatusCode::client_error_request_header_fields_too_large, "431 Request Header Fields Too Large"}, {StatusCode::client_error_unavailable_for_legal_reasons, "451 Unavailable For Legal Reasons"}, {StatusCode::server_error_internal_server_error, "500 Internal Server Error"}, {StatusCode::server_error_not_implemented, "501 Not Implemented"}, {StatusCode::server_error_bad_gateway, "502 Bad Gateway"}, {StatusCode::server_error_service_unavailable, "503 Service Unavailable"}, {StatusCode::server_error_gateway_timeout, "504 Gateway Timeout"}, {StatusCode::server_error_http_version_not_supported, "505 HTTP Version Not Supported"}, {StatusCode::server_error_variant_also_negotiates, "506 Variant Also Negotiates"}, {StatusCode::server_error_insufficient_storage, "507 Insufficient Storage"}, {StatusCode::server_error_loop_detected, "508 Loop Detected"}, {StatusCode::server_error_not_extended, "510 Not Extended"}, {StatusCode::server_error_network_authentication_required, "511 Network Authentication Required"}}; return status_code_strings; } inline StatusCode status_code(const std::string &status_code_string) noexcept { if(status_code_string.size() < 3) return StatusCode::unknown; auto number = status_code_string.substr(0, 3); if(number[0] < '0' || number[0] > '9' || number[1] < '0' || number[1] > '9' || number[2] < '0' || number[2] > '9') return StatusCode::unknown; class StringToStatusCode : public std::unordered_map<std::string, SimpleWeb::StatusCode> { public: StringToStatusCode() { for(auto &status_code : status_code_strings()) emplace(status_code.second.substr(0, 3), status_code.first); } }; static StringToStatusCode string_to_status_code; auto pos = string_to_status_code.find(number); if(pos == string_to_status_code.end()) return static_cast<StatusCode>(atoi(number.c_str())); return pos->second; } inline const std::string &status_code(StatusCode status_code_enum) noexcept { auto pos = status_code_strings().find(status_code_enum); if(pos == status_code_strings().end()) { static std::string empty_string; return empty_string; } return pos->second; } } // namespace SimpleWeb #endif // SIMPLE_WEB_STATUS_CODE_HPP ================================================ FILE: C/thirdparty/Simple-Web-Server/tests/CMakeLists.txt ================================================ if(NOT MSVC) add_compile_options(-fno-access-control) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") add_compile_options(-Wno-thread-safety) endif() if(BUILD_TESTING) add_executable(sws_io_test io_test.cpp) target_link_libraries(sws_io_test simple-web-server) add_test(NAME sws_io_test COMMAND sws_io_test) add_executable(sws_parse_test parse_test.cpp) target_link_libraries(sws_parse_test simple-web-server) add_test(NAME sws_parse_test COMMAND sws_parse_test) endif() endif() if(OPENSSL_FOUND AND BUILD_TESTING) add_executable(sws_crypto_test crypto_test.cpp) target_link_libraries(sws_crypto_test simple-web-server) add_test(NAME sws_crypto_test COMMAND sws_crypto_test) endif() if(BUILD_TESTING) add_executable(status_code_test status_code_test.cpp) target_link_libraries(status_code_test simple-web-server) add_test(NAME status_code_test COMMAND status_code_test) endif() if(BUILD_FUZZING) add_executable(percent_decode fuzzers/percent_decode.cpp) target_compile_options(percent_decode PRIVATE -fsanitize=address,fuzzer) target_link_options(percent_decode PRIVATE -fsanitize=address,fuzzer) target_link_libraries(percent_decode simple-web-server) add_executable(query_string_parse fuzzers/query_string_parse.cpp) target_compile_options(query_string_parse PRIVATE -fsanitize=address,fuzzer) target_link_options(query_string_parse PRIVATE -fsanitize=address,fuzzer) target_link_libraries(query_string_parse simple-web-server) add_executable(http_header_parse fuzzers/http_header_parse.cpp) target_compile_options(http_header_parse PRIVATE -fsanitize=address,fuzzer) target_link_options(http_header_parse PRIVATE -fsanitize=address,fuzzer) target_link_libraries(http_header_parse simple-web-server) add_executable(http_header_field_value_semicolon_separated_attributes_parse fuzzers/http_header_field_value_semicolon_separated_attributes_parse.cpp) target_compile_options(http_header_field_value_semicolon_separated_attributes_parse PRIVATE -fsanitize=address,fuzzer) target_link_options(http_header_field_value_semicolon_separated_attributes_parse PRIVATE -fsanitize=address,fuzzer) target_link_libraries(http_header_field_value_semicolon_separated_attributes_parse simple-web-server) add_executable(request_message_parse fuzzers/request_message_parse.cpp) target_compile_options(request_message_parse PRIVATE -fsanitize=address,fuzzer) target_link_options(request_message_parse PRIVATE -fsanitize=address,fuzzer) target_link_libraries(request_message_parse simple-web-server) add_executable(response_message_parse fuzzers/response_message_parse.cpp) target_compile_options(response_message_parse PRIVATE -fsanitize=address,fuzzer) target_link_options(response_message_parse PRIVATE -fsanitize=address,fuzzer) target_link_libraries(response_message_parse simple-web-server) endif() ================================================ FILE: C/thirdparty/Simple-Web-Server/tests/assert.hpp ================================================ #ifndef SIMPLE_WEB_ASSERT_HPP #define SIMPLE_WEB_ASSERT_HPP #include <cstdlib> #include <iostream> #define ASSERT(e) ((void)((e) ? ((void)0) : ((void)(std::cerr << "Assertion failed: (" << #e << "), function " << __func__ << ", file " << __FILE__ << ", line " << __LINE__ << ".\n"), std::abort()))) #endif /* SIMPLE_WEB_ASSERT_HPP */ ================================================ FILE: C/thirdparty/Simple-Web-Server/tests/crypto_test.cpp ================================================ #include "assert.hpp" #include "crypto.hpp" #include <vector> using namespace std; using namespace SimpleWeb; const vector<pair<string, string>> base64_string_tests = { {"", ""}, {"f", "Zg=="}, {"fo", "Zm8="}, {"foo", "Zm9v"}, {"foob", "Zm9vYg=="}, {"fooba", "Zm9vYmE="}, {"foobar", "Zm9vYmFy"}, {"The itsy bitsy spider climbed up the waterspout.\r\nDown came the rain\r\nand washed the spider out.\r\nOut came the sun\r\nand dried up all the rain\r\nand the itsy bitsy spider climbed up the spout again.", "VGhlIGl0c3kgYml0c3kgc3BpZGVyIGNsaW1iZWQgdXAgdGhlIHdhdGVyc3BvdXQuDQpEb3duIGNhbWUgdGhlIHJhaW4NCmFuZCB3YXNoZWQgdGhlIHNwaWRlciBvdXQuDQpPdXQgY2FtZSB0aGUgc3VuDQphbmQgZHJpZWQgdXAgYWxsIHRoZSByYWluDQphbmQgdGhlIGl0c3kgYml0c3kgc3BpZGVyIGNsaW1iZWQgdXAgdGhlIHNwb3V0IGFnYWluLg=="}}; const vector<pair<string, string>> md5_string_tests = { {"", "d41d8cd98f00b204e9800998ecf8427e"}, {"The quick brown fox jumps over the lazy dog", "9e107d9d372bb6826bd81d3542a419d6"}}; const vector<pair<string, string>> sha1_string_tests = { {"", "da39a3ee5e6b4b0d3255bfef95601890afd80709"}, {"The quick brown fox jumps over the lazy dog", "2fd4e1c67a2d28fced849ee1bb76e7391b93eb12"}}; const vector<pair<string, string>> sha256_string_tests = { {"", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, {"The quick brown fox jumps over the lazy dog", "d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592"}}; const vector<pair<string, string>> sha512_string_tests = { {"", "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"}, {"The quick brown fox jumps over the lazy dog", "07e547d9586f6a73f73fbac0435ed76951218fb7d0c8d788a309d785436bbb642e93a252a954f23912547d1e8a3b5ed6e1bfd7097821233fa0538f3db854fee6"}}; int main() { for(auto &string_test : base64_string_tests) { ASSERT(Crypto::Base64::encode(string_test.first) == string_test.second); ASSERT(Crypto::Base64::decode(string_test.second) == string_test.first); } for(auto &string_test : md5_string_tests) { ASSERT(Crypto::to_hex_string(Crypto::md5(string_test.first)) == string_test.second); stringstream ss(string_test.first); ASSERT(Crypto::to_hex_string(Crypto::md5(ss)) == string_test.second); } for(auto &string_test : sha1_string_tests) { ASSERT(Crypto::to_hex_string(Crypto::sha1(string_test.first)) == string_test.second); stringstream ss(string_test.first); ASSERT(Crypto::to_hex_string(Crypto::sha1(ss)) == string_test.second); } for(auto &string_test : sha256_string_tests) { ASSERT(Crypto::to_hex_string(Crypto::sha256(string_test.first)) == string_test.second); stringstream ss(string_test.first); ASSERT(Crypto::to_hex_string(Crypto::sha256(ss)) == string_test.second); } for(auto &string_test : sha512_string_tests) { ASSERT(Crypto::to_hex_string(Crypto::sha512(string_test.first)) == string_test.second); stringstream ss(string_test.first); ASSERT(Crypto::to_hex_string(Crypto::sha512(ss)) == string_test.second); } // Testing iterations ASSERT(Crypto::to_hex_string(Crypto::sha1("Test", 1)) == "640ab2bae07bedc4c163f679a746f7ab7fb5d1fa"); ASSERT(Crypto::to_hex_string(Crypto::sha1("Test", 2)) == "af31c6cbdecd88726d0a9b3798c71ef41f1624d5"); stringstream ss("Test"); ASSERT(Crypto::to_hex_string(Crypto::sha1(ss, 2)) == "af31c6cbdecd88726d0a9b3798c71ef41f1624d5"); ASSERT(Crypto::to_hex_string(Crypto::pbkdf2("Password", "Salt", 4096, 128 / 8)) == "f66df50f8aaa11e4d9721e1312ff2e66"); ASSERT(Crypto::to_hex_string(Crypto::pbkdf2("Password", "Salt", 8192, 512 / 8)) == "a941ccbc34d1ee8ebbd1d34824a419c3dc4eac9cbc7c36ae6c7ca8725e2b618a6ad22241e787af937b0960cf85aa8ea3a258f243e05d3cc9b08af5dd93be046c"); } ================================================ FILE: C/thirdparty/Simple-Web-Server/tests/io_test.cpp ================================================ #include "assert.hpp" #include "client_http.hpp" #include "server_http.hpp" #include <future> using namespace std; using HttpServer = SimpleWeb::Server<SimpleWeb::HTTP>; using HttpClient = SimpleWeb::Client<SimpleWeb::HTTP>; int main() { // Test ScopeRunner { SimpleWeb::ScopeRunner scope_runner; std::thread cancel_thread; { ASSERT(scope_runner.count == 0); auto lock = scope_runner.continue_lock(); ASSERT(lock); ASSERT(scope_runner.count == 1); { auto lock = scope_runner.continue_lock(); ASSERT(lock); ASSERT(scope_runner.count == 2); } ASSERT(scope_runner.count == 1); cancel_thread = thread([&scope_runner] { scope_runner.stop(); ASSERT(scope_runner.count == -1); }); this_thread::sleep_for(chrono::milliseconds(500)); ASSERT(scope_runner.count == 1); } cancel_thread.join(); ASSERT(scope_runner.count == -1); auto lock = scope_runner.continue_lock(); ASSERT(!lock); scope_runner.stop(); ASSERT(scope_runner.count == -1); scope_runner.count = 0; vector<thread> threads; for(size_t c = 0; c < 100; ++c) { threads.emplace_back([&scope_runner] { auto lock = scope_runner.continue_lock(); ASSERT(scope_runner.count > 0); }); } for(auto &thread : threads) thread.join(); ASSERT(scope_runner.count == 0); } HttpServer server; server.config.port = 8080; server.resource["^/string$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { auto content = request->content.string(); ASSERT(content == request->content.string()); *response << "HTTP/1.1 200 OK\r\nContent-Length: " << content.length() << "\r\n\r\n" << content; ASSERT(!request->remote_endpoint().address().to_string().empty()); ASSERT(request->remote_endpoint().port() != 0); }; server.resource["^/string/dup$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { auto content = request->content.string(); // Send content twice, before it has a chance to be written to the socket. *response << "HTTP/1.1 200 OK\r\nContent-Length: " << (content.length() * 2) << "\r\n\r\n" << content; response->send(); *response << content; response->send(); ASSERT(!request->remote_endpoint().address().to_string().empty()); ASSERT(request->remote_endpoint().port() != 0); }; server.resource["^/string2$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { response->write(request->content.string()); }; server.resource["^/string3$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { stringstream stream; stream << request->content.rdbuf(); response->write(stream); }; server.resource["^/string4$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { response->write(SimpleWeb::StatusCode::client_error_forbidden, {{"Test1", "test2"}, {"tesT3", "test4"}}); }; server.resource["^/info$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { stringstream content_stream; content_stream << request->method << " " << request->path << " " << request->http_version << " "; content_stream << request->header.find("test parameter")->second; content_stream.seekp(0, ios::end); *response << "HTTP/1.1 200 OK\r\nContent-Length: " << content_stream.tellp() << "\r\n\r\n" << content_stream.rdbuf(); }; server.resource["^/work$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { thread work_thread([response] { this_thread::sleep_for(chrono::seconds(5)); response->write("Work done"); }); work_thread.detach(); }; server.resource["^/match/([0-9]+)$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { string number = request->path_match[1]; *response << "HTTP/1.1 200 OK\r\nContent-Length: " << number.length() << "\r\n\r\n" << number; }; server.resource["^/header$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { auto content = request->header.find("test1")->second + request->header.find("test2")->second; *response << "HTTP/1.1 200 OK\r\nContent-Length: " << content.length() << "\r\n\r\n" << content; }; server.resource["^/query_string$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ASSERT(request->path == "/query_string"); ASSERT(request->query_string == "testing"); auto queries = request->parse_query_string(); auto it = queries.find("Testing"); ASSERT(it != queries.end() && it->first == "testing" && it->second == ""); response->write(request->query_string); }; server.resource["^/chunked$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ASSERT(request->path == "/chunked"); ASSERT(request->content.string() == "SimpleWeb in\r\n\r\nchunks."); response->write("6\r\nSimple\r\n3\r\nWeb\r\nE\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n", {{"Transfer-Encoding", "chunked"}}); }; server.resource["^/chunked2$"]["POST"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> request) { ASSERT(request->path == "/chunked2"); ASSERT(request->content.string() == "HelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorld"); response->write("258\r\nHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorld\r\n0\r\n\r\n", {{"Transfer-Encoding", "chunked"}}); }; server.resource["^/event-stream1$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { thread work_thread([response] { response->close_connection_after_response = true; // Unspecified content length // Send header promise<bool> header_error; response->write({{"Content-Type", "text/event-stream"}}); response->send([&header_error](const SimpleWeb::error_code &ec) { header_error.set_value(static_cast<bool>(ec)); }); ASSERT(!header_error.get_future().get()); *response << "data: 1\n\n"; promise<bool> error; response->send([&error](const SimpleWeb::error_code &ec) { error.set_value(static_cast<bool>(ec)); }); ASSERT(!error.get_future().get()); // Write result *response << "data: 2\n\n"; }); work_thread.detach(); }; server.resource["^/event-stream2$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { thread work_thread([response] { response->close_connection_after_response = true; // Unspecified content length // Send header promise<bool> header_error; response->write({{"Content-Type", "text/event-stream"}}); response->send([&header_error](const SimpleWeb::error_code &ec) { header_error.set_value(static_cast<bool>(ec)); }); ASSERT(!header_error.get_future().get()); *response << "data: 1\r\n\r\n"; promise<bool> error; response->send([&error](const SimpleWeb::error_code &ec) { error.set_value(static_cast<bool>(ec)); }); ASSERT(!error.get_future().get()); // Write result *response << "data: 2\r\n\r\n"; }); work_thread.detach(); }; server.resource["^/session-close$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { response->close_connection_after_response = true; // Unspecified content length response->write("test", {{"Session", "close"}}); }; server.resource["^/session-close-without-correct-header$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { response->close_connection_after_response = true; // Unspecified content length response->write("test"); }; server.resource["^/non-standard-line-endings1$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { *response << "HTTP/1.1 200 OK\r\nname: value\n\n"; }; server.resource["^/non-standard-line-endings2$"]["GET"] = [](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { *response << "HTTP/1.1 200 OK\nname: value\n\n"; }; std::string long_response; for(int c = 0; c < 1000; ++c) long_response += to_string(c); server.resource["^/long-response$"]["GET"] = [&long_response](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { response->write(long_response, {{"name", "value"}}); }; thread server_thread([&server]() { // Start server server.start(); }); this_thread::sleep_for(chrono::seconds(1)); server.stop(); server_thread.join(); server_thread = thread([&server]() { // Start server server.start(); }); this_thread::sleep_for(chrono::seconds(1)); // Test various request types { HttpClient client("localhost:8080"); { stringstream output; auto r = client.request("POST", "/string", "A string"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); output << r->content.rdbuf(); ASSERT(output.str() == "A string"); } { auto r = client.request("POST", "/string", "A string"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); ASSERT(r->content.string() == "A string"); ASSERT(r->content.string() == "A string"); } { stringstream output; auto r = client.request("POST", "/string2", "A string"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); output << r->content.rdbuf(); ASSERT(output.str() == "A string"); } { stringstream output; auto r = client.request("POST", "/string3", "A string"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); output << r->content.rdbuf(); ASSERT(output.str() == "A string"); } { stringstream output; auto r = client.request("POST", "/string4", "A string"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::client_error_forbidden); ASSERT(r->header.size() == 3); ASSERT(r->header.find("test1")->second == "test2"); ASSERT(r->header.find("tEst3")->second == "test4"); ASSERT(r->header.find("content-length")->second == "0"); output << r->content.rdbuf(); ASSERT(output.str() == ""); } { stringstream output; stringstream content("A string"); auto r = client.request("POST", "/string", content); output << r->content.rdbuf(); ASSERT(output.str() == "A string"); } { // Test rapid calls to Response::send stringstream output; stringstream content("A string\n"); auto r = client.request("POST", "/string/dup", content); output << r->content.rdbuf(); ASSERT(output.str() == "A string\nA string\n"); } { stringstream output; auto r = client.request("GET", "/info", "", {{"Test Parameter", "test value"}}); output << r->content.rdbuf(); ASSERT(output.str() == "GET /info 1.1 test value"); } { stringstream output; auto r = client.request("GET", "/match/123"); output << r->content.rdbuf(); ASSERT(output.str() == "123"); } { auto r = client.request("POST", "/chunked", "6\r\nSimple\r\n3\r\nWeb\r\nE\r\n in\r\n\r\nchunks.\r\n0\r\n\r\n", {{"Transfer-Encoding", "chunked"}}); ASSERT(r->content.string() == "SimpleWeb in\r\n\r\nchunks."); } { auto r = client.request("POST", "/chunked2", "258\r\nHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorld\r\n0\r\n\r\n", {{"Transfer-Encoding", "chunked"}}); ASSERT(r->content.string() == "HelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorldHelloWorld"); } // Test reconnecting for(int c = 0; c < 20; ++c) { auto r = client.request("GET", "/session-close"); ASSERT(r->content.string() == "test"); } for(int c = 0; c < 20; ++c) { auto r = client.request("GET", "/session-close-without-correct-header"); ASSERT(r->content.string() == "test"); } // Test non-standard line endings { auto r = client.request("GET", "/non-standard-line-endings1"); ASSERT(r->http_version == "1.1"); ASSERT(r->status_code == "200 OK"); ASSERT(r->header.size() == 1); ASSERT(r->header.begin()->first == "name"); ASSERT(r->header.begin()->second == "value"); ASSERT(r->content.string().empty()); } { auto r = client.request("GET", "/non-standard-line-endings2"); ASSERT(r->http_version == "1.1"); ASSERT(r->status_code == "200 OK"); ASSERT(r->header.size() == 1); ASSERT(r->header.begin()->first == "name"); ASSERT(r->header.begin()->second == "value"); ASSERT(r->content.string().empty()); } } { HttpClient client("localhost:8080"); HttpClient::Connection *connection; { // test performing the stream version of the request methods first stringstream output; stringstream content("A string"); auto r = client.request("POST", "/string", content); output << r->content.rdbuf(); ASSERT(output.str() == "A string"); ASSERT(client.connections.size() == 1); connection = client.connections.begin()->get(); } { stringstream output; auto r = client.request("POST", "/string", "A string"); output << r->content.rdbuf(); ASSERT(output.str() == "A string"); ASSERT(client.connections.size() == 1); ASSERT(connection == client.connections.begin()->get()); } { stringstream output; auto r = client.request("GET", "/header", "", {{"test1", "test"}, {"test2", "ing"}}); output << r->content.rdbuf(); ASSERT(output.str() == "testing"); ASSERT(client.connections.size() == 1); ASSERT(connection == client.connections.begin()->get()); } { stringstream output; auto r = client.request("GET", "/query_string?testing"); ASSERT(r->content.string() == "testing"); ASSERT(client.connections.size() == 1); ASSERT(connection == client.connections.begin()->get()); } } // Test large responses { { HttpClient client("localhost:8080"); client.config.max_response_streambuf_size = 400; bool thrown = false; try { auto r = client.request("GET", "/long-response"); } catch(...) { thrown = true; } ASSERT(thrown); } HttpClient client("localhost:8080"); client.config.max_response_streambuf_size = 400; { size_t calls = 0; bool end = false; std::string content; client.request("GET", "/long-response", [&calls, &content, &end](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { ASSERT(!ec); content += response->content.string(); calls++; if(calls == 1) ASSERT(response->content.end == false); end = response->content.end; }); client.io_service->run(); ASSERT(content == long_response); ASSERT(calls > 2); ASSERT(end == true); } { size_t calls = 0; std::string content; client.request("GET", "/long-response", [&calls, &content](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { if(calls == 0) ASSERT(!ec); content += response->content.string(); calls++; response->close(); }); SimpleWeb::restart(*client.io_service); client.io_service->run(); ASSERT(!content.empty()); ASSERT(calls >= 2); } } // Test client timeout { HttpClient client("localhost:8080"); client.config.timeout = 2; bool thrown = false; try { auto r = client.request("GET", "/work"); } catch(...) { thrown = true; } ASSERT(thrown); } { HttpClient client("localhost:8080"); client.config.timeout = 2; bool call = false; client.request("GET", "/work", [&call](shared_ptr<HttpClient::Response> /*response*/, const SimpleWeb::error_code &ec) { ASSERT(ec); call = true; }); SimpleWeb::restart(*client.io_service); client.io_service->run(); ASSERT(call); } // Test asynchronous requests { HttpClient client("localhost:8080"); bool call = false; client.request("GET", "/match/123", [&call](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { ASSERT(!ec); stringstream output; output << response->content.rdbuf(); ASSERT(output.str() == "123"); call = true; }); client.io_service->run(); ASSERT(call); // Test event-stream { vector<int> calls(4, 0); std::size_t call_num = 0; client.request("GET", "/event-stream1", [&calls, &call_num](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { calls.at(call_num) = 1; if(call_num == 0) { ASSERT(response->content.string().empty()); ASSERT(!ec); } else if(call_num == 1) { ASSERT(response->content.string() == "data: 1\n"); ASSERT(!ec); } else if(call_num == 2) { ASSERT(response->content.string() == "data: 2\n"); ASSERT(!ec); } else if(call_num == 3) { ASSERT(response->content.string().empty()); ASSERT(ec == SimpleWeb::error::eof); } ++call_num; }); SimpleWeb::restart(*client.io_service); client.io_service->run(); for(auto call : calls) ASSERT(call); } { vector<int> calls(4, 0); std::size_t call_num = 0; client.request("GET", "/event-stream2", [&calls, &call_num](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { calls.at(call_num) = 1; if(call_num == 0) { ASSERT(response->content.string().empty()); ASSERT(!ec); } else if(call_num == 1) { ASSERT(response->content.string() == "data: 1\n"); ASSERT(!ec); } else if(call_num == 2) { ASSERT(response->content.string() == "data: 2\n"); ASSERT(!ec); } else if(call_num == 3) { ASSERT(response->content.string().empty()); ASSERT(ec == SimpleWeb::error::eof); } ++call_num; }); SimpleWeb::restart(*client.io_service); client.io_service->run(); for(auto call : calls) ASSERT(call); } // Test concurrent requests from same client { vector<int> calls(100, 0); vector<thread> threads; for(size_t c = 0; c < 100; ++c) { threads.emplace_back([c, &client, &calls] { client.request("GET", "/match/123", [c, &calls](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { ASSERT(!ec); stringstream output; output << response->content.rdbuf(); ASSERT(output.str() == "123"); calls[c] = 1; }); }); } for(auto &thread : threads) thread.join(); ASSERT(client.connections.size() == 100); SimpleWeb::restart(*client.io_service); client.io_service->run(); ASSERT(client.connections.size() == 1); for(auto call : calls) ASSERT(call); } // Test concurrent synchronous request calls from same client { HttpClient client("localhost:8080"); { vector<int> calls(5, 0); vector<thread> threads; for(size_t c = 0; c < 5; ++c) { threads.emplace_back([c, &client, &calls] { try { auto r = client.request("GET", "/match/123"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); ASSERT(r->content.string() == "123"); calls[c] = 1; } catch(...) { ASSERT(false); } }); } for(auto &thread : threads) thread.join(); ASSERT(client.connections.size() == 1); for(auto call : calls) ASSERT(call); } } // Test concurrent requests from different clients { vector<int> calls(10, 0); vector<thread> threads; for(size_t c = 0; c < 10; ++c) { threads.emplace_back([c, &calls] { HttpClient client("localhost:8080"); client.request("POST", "/string", "A string", [c, &calls](shared_ptr<HttpClient::Response> response, const SimpleWeb::error_code &ec) { ASSERT(!ec); ASSERT(response->content.string() == "A string"); calls[c] = 1; }); client.io_service->run(); }); } for(auto &thread : threads) thread.join(); for(auto call : calls) ASSERT(call); } } // Test multiple requests through a persistent connection { HttpClient client("localhost:8080"); ASSERT(client.connections.size() == 0); for(size_t c = 0; c < 5000; ++c) { auto r1 = client.request("POST", "/string", "A string"); ASSERT(SimpleWeb::status_code(r1->status_code) == SimpleWeb::StatusCode::success_ok); ASSERT(r1->content.string() == "A string"); ASSERT(client.connections.size() == 1); stringstream content("A string"); auto r2 = client.request("POST", "/string", content); ASSERT(SimpleWeb::status_code(r2->status_code) == SimpleWeb::StatusCode::success_ok); ASSERT(r2->content.string() == "A string"); ASSERT(client.connections.size() == 1); } } // Test multiple requests through new several client objects for(size_t c = 0; c < 100; ++c) { { HttpClient client("localhost:8080"); auto r = client.request("POST", "/string", "A string"); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); ASSERT(r->content.string() == "A string"); ASSERT(client.connections.size() == 1); } { HttpClient client("localhost:8080"); stringstream content("A string"); auto r = client.request("POST", "/string", content); ASSERT(SimpleWeb::status_code(r->status_code) == SimpleWeb::StatusCode::success_ok); ASSERT(r->content.string() == "A string"); ASSERT(client.connections.size() == 1); } } // Test Client client's stop() for(size_t c = 0; c < 40; ++c) { auto io_service = make_shared<SimpleWeb::io_context>(); bool call = false; HttpClient client("localhost:8080"); client.io_service = io_service; client.request("GET", "/work", [&call](shared_ptr<HttpClient::Response> /*response*/, const SimpleWeb::error_code &ec) { call = true; ASSERT(ec); }); thread thread([io_service] { io_service->run(); }); this_thread::sleep_for(chrono::milliseconds(100)); client.stop(); this_thread::sleep_for(chrono::milliseconds(100)); thread.join(); ASSERT(call); } // Test Client destructor that should cancel the client's request for(size_t c = 0; c < 40; ++c) { auto io_service = make_shared<SimpleWeb::io_context>(); { HttpClient client("localhost:8080"); client.io_service = io_service; client.request("GET", "/work", [](shared_ptr<HttpClient::Response> /*response*/, const SimpleWeb::error_code & /*ec*/) { ASSERT(false); }); thread thread([io_service] { io_service->run(); }); thread.detach(); this_thread::sleep_for(chrono::milliseconds(100)); } this_thread::sleep_for(chrono::milliseconds(100)); } server.stop(); server_thread.join(); // Test server destructor { auto io_service = make_shared<SimpleWeb::io_context>(); bool call = false; bool client_catch = false; { HttpServer server; server.config.port = 8081; server.io_service = io_service; server.resource["^/test$"]["GET"] = [&call](shared_ptr<HttpServer::Response> response, shared_ptr<HttpServer::Request> /*request*/) { call = true; thread sleep_thread([response] { this_thread::sleep_for(chrono::seconds(5)); response->write(SimpleWeb::StatusCode::success_ok, "test"); response->send([](const SimpleWeb::error_code & /*ec*/) { ASSERT(false); }); }); sleep_thread.detach(); }; server.start(); thread server_thread([io_service] { io_service->run(); }); server_thread.detach(); this_thread::sleep_for(chrono::seconds(1)); thread client_thread([&client_catch] { HttpClient client("localhost:8081"); try { auto r = client.request("GET", "/test"); ASSERT(false); } catch(...) { client_catch = true; } }); client_thread.detach(); this_thread::sleep_for(chrono::seconds(1)); } this_thread::sleep_for(chrono::seconds(5)); ASSERT(call); ASSERT(client_catch); io_service->stop(); } } ================================================ FILE: C/thirdparty/Simple-Web-Server/tests/parse_test.cpp ================================================ #include "assert.hpp" #include "client_http.hpp" #include "server_http.hpp" #include <iostream> using namespace std; using namespace SimpleWeb; class ServerTest : public ServerBase<HTTP> { public: ServerTest() : ServerBase<HTTP>::ServerBase(8080) {} void accept() noexcept override {} void parse_request_test() { auto session = std::make_shared<Session>(static_cast<size_t>(-1), create_connection(*io_service)); std::ostream stream(&session->request->content.streambuf); stream << "GET /test/ HTTP/1.1\r\n"; stream << "TestHeader: test\r\n"; stream << "TestHeader2:test2\r\n"; stream << "TestHeader3:test3a\r\n"; stream << "TestHeader3:test3b\r\n"; stream << "\r\n"; ASSERT(RequestMessage::parse(session->request->content, session->request->method, session->request->path, session->request->query_string, session->request->http_version, session->request->header)); ASSERT(session->request->method == "GET"); ASSERT(session->request->path == "/test/"); ASSERT(session->request->http_version == "1.1"); ASSERT(session->request->header.size() == 4); auto header_it = session->request->header.find("TestHeader"); ASSERT(header_it != session->request->header.end() && header_it->second == "test"); header_it = session->request->header.find("TestHeader2"); ASSERT(header_it != session->request->header.end() && header_it->second == "test2"); header_it = session->request->header.find("testheader"); ASSERT(header_it != session->request->header.end() && header_it->second == "test"); header_it = session->request->header.find("testheader2"); ASSERT(header_it != session->request->header.end() && header_it->second == "test2"); auto range = session->request->header.equal_range("testheader3"); auto first = range.first; auto second = first; ++second; ASSERT(range.first != session->request->header.end() && range.second != session->request->header.end() && ((first->second == "test3a" && second->second == "test3b") || (first->second == "test3b" && second->second == "test3a"))); } }; class ClientTest : public ClientBase<HTTP> { public: ClientTest(const std::string &server_port_path) : ClientBase<HTTP>::ClientBase(server_port_path, 80) {} std::shared_ptr<Connection> create_connection() noexcept override { return nullptr; } void connect(const std::shared_ptr<Session> &) noexcept override {} void constructor_parse_test1() { ASSERT(host == "test.org"); ASSERT(port == 8080); } void constructor_parse_test2() { ASSERT(host == "test.org"); ASSERT(port == 80); } void parse_response_header_test() { std::shared_ptr<Response> response(new Response(static_cast<size_t>(-1), nullptr)); ostream stream(&response->streambuf); stream << "HTTP/1.1 200 OK\r\n"; stream << "TestHeader: test\r\n"; stream << "TestHeader2: test2\r\n"; stream << "TestHeader3:test3a\r\n"; stream << "TestHeader3:test3b\r\n"; stream << "TestHeader4:\r\n"; stream << "TestHeader5: \r\n"; stream << "TestHeader6: \r\n"; stream << "\r\n"; ASSERT(ResponseMessage::parse(response->content, response->http_version, response->status_code, response->header)); ASSERT(response->http_version == "1.1"); ASSERT(response->status_code == "200 OK"); ASSERT(response->header.size() == 7); auto header_it = response->header.find("TestHeader"); ASSERT(header_it != response->header.end() && header_it->second == "test"); header_it = response->header.find("TestHeader2"); ASSERT(header_it != response->header.end() && header_it->second == "test2"); header_it = response->header.find("testheader"); ASSERT(header_it != response->header.end() && header_it->second == "test"); header_it = response->header.find("testheader2"); ASSERT(header_it != response->header.end() && header_it->second == "test2"); auto range = response->header.equal_range("testheader3"); auto first = range.first; auto second = first; ++second; ASSERT(range.first != response->header.end() && range.second != response->header.end() && ((first->second == "test3a" && second->second == "test3b") || (first->second == "test3b" && second->second == "test3a"))); header_it = response->header.find("TestHeader4"); ASSERT(header_it != response->header.end() && header_it->second == ""); header_it = response->header.find("TestHeader5"); ASSERT(header_it != response->header.end() && header_it->second == ""); header_it = response->header.find("TestHeader6"); ASSERT(header_it != response->header.end() && header_it->second == ""); } }; int main() { ASSERT(case_insensitive_equal("Test", "tesT")); ASSERT(case_insensitive_equal("tesT", "test")); ASSERT(!case_insensitive_equal("test", "tseT")); CaseInsensitiveEqual equal; ASSERT(equal("Test", "tesT")); ASSERT(equal("tesT", "test")); ASSERT(!equal("test", "tset")); CaseInsensitiveHash hash; ASSERT(hash("Test") == hash("tesT")); ASSERT(hash("tesT") == hash("test")); ASSERT(hash("test") != hash("tset")); auto percent_decoded = "testing æøå !#$&'()*+,/:;=?@[]123-._~\r\n"; auto percent_encoded = "testing%20%C3%A6%C3%B8%C3%A5%20%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D123-._~%0D%0A"; ASSERT(Percent::encode(percent_decoded) == percent_encoded); ASSERT(Percent::decode(percent_encoded) == percent_decoded); ASSERT(Percent::decode(Percent::encode(percent_decoded)) == percent_decoded); SimpleWeb::CaseInsensitiveMultimap fields = {{"test1", "æøå"}, {"test2", "!#$&'()*+,/:;=?@[]"}}; auto query_string1 = "test1=%C3%A6%C3%B8%C3%A5&test2=%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D"; auto query_string2 = "test2=%21%23%24%26%27%28%29%2A%2B%2C%2F%3A%3B%3D%3F%40%5B%5D&test1=%C3%A6%C3%B8%C3%A5"; auto query_string_result = QueryString::create(fields); ASSERT(query_string_result == query_string1 || query_string_result == query_string2); auto fields_result1 = QueryString::parse(query_string1); auto fields_result2 = QueryString::parse(query_string2); ASSERT(fields_result1 == fields_result2 && fields_result1 == fields); auto serverTest = make_shared<ServerTest>(); serverTest->io_service = std::make_shared<io_context>(); serverTest->parse_request_test(); auto clientTest = make_shared<ClientTest>("test.org:8080"); clientTest->constructor_parse_test1(); auto clientTest2 = make_shared<ClientTest>("test.org"); clientTest2->constructor_parse_test2(); clientTest2->parse_response_header_test(); io_context io_service; asio::ip::tcp::socket socket(io_service); SimpleWeb::Server<HTTP>::Request request(static_cast<size_t>(-1), nullptr); { request.query_string = ""; auto queries = request.parse_query_string(); ASSERT(queries.empty()); } { request.query_string = "="; auto queries = request.parse_query_string(); ASSERT(queries.empty()); } { request.query_string = "=test"; auto queries = request.parse_query_string(); ASSERT(queries.empty()); } { request.query_string = "a=1%202%20%203&b=3+4&c&d=æ%25ø%26å%3F"; auto queries = request.parse_query_string(); { auto range = queries.equal_range("a"); ASSERT(range.first != range.second); ASSERT(range.first->second == "1 2 3"); } { auto range = queries.equal_range("b"); ASSERT(range.first != range.second); ASSERT(range.first->second == "3 4"); } { auto range = queries.equal_range("c"); ASSERT(range.first != range.second); ASSERT(range.first->second == ""); } { auto range = queries.equal_range("d"); ASSERT(range.first != range.second); ASSERT(range.first->second == "æ%ø&å?"); } } { SimpleWeb::CaseInsensitiveMultimap solution; std::stringstream header; auto parsed = SimpleWeb::HttpHeader::parse(header); ASSERT(parsed == solution); } { SimpleWeb::CaseInsensitiveMultimap solution = {{"Content-Type", "application/json"}}; std::stringstream header("Content-Type: application/json"); auto parsed = SimpleWeb::HttpHeader::parse(header); ASSERT(parsed == solution); } { SimpleWeb::CaseInsensitiveMultimap solution = {{"Content-Type", "application/json"}}; std::stringstream header("Content-Type: application/json\r"); auto parsed = SimpleWeb::HttpHeader::parse(header); ASSERT(parsed == solution); } { SimpleWeb::CaseInsensitiveMultimap solution = {{"Content-Type", "application/json"}}; std::stringstream header("Content-Type: application/json\r\n"); auto parsed = SimpleWeb::HttpHeader::parse(header); ASSERT(parsed == solution); } { { SimpleWeb::CaseInsensitiveMultimap solution; auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse(""); ASSERT(parsed == solution); } { SimpleWeb::CaseInsensitiveMultimap solution = {{"a", ""}}; auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("a"); ASSERT(parsed == solution); } { SimpleWeb::CaseInsensitiveMultimap solution = {{"a", ""}, {"b", ""}}; { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("a; b"); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("a;b"); ASSERT(parsed == solution); } } { SimpleWeb::CaseInsensitiveMultimap solution = {{"a", ""}, {"b", "c"}}; { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("a; b=c"); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("a;b=c"); ASSERT(parsed == solution); } } { SimpleWeb::CaseInsensitiveMultimap solution = {{"form-data", ""}}; auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data"); ASSERT(parsed == solution); } { SimpleWeb::CaseInsensitiveMultimap solution = {{"form-data", ""}, {"test", ""}}; { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; test"); ASSERT(parsed == solution); } } { SimpleWeb::CaseInsensitiveMultimap solution = {{"form-data", ""}, {"name", "file"}}; { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=\"file\""); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=file"); ASSERT(parsed == solution); } } { SimpleWeb::CaseInsensitiveMultimap solution = {{"form-data", ""}, {"name", "file"}, {"filename", "filename.png"}}; { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=\"file\"; filename=\"filename.png\""); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data;name=\"file\";filename=\"filename.png\""); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=file; filename=filename.png"); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data;name=file;filename=filename.png"); ASSERT(parsed == solution); } } { SimpleWeb::CaseInsensitiveMultimap solution = {{"form-data", ""}, {"name", "fi le"}, {"filename", "file name.png"}}; { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=\"fi le\"; filename=\"file name.png\""); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=\"fi%20le\"; filename=\"file%20name.png\""); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=fi le; filename=file name.png"); ASSERT(parsed == solution); } { auto parsed = SimpleWeb::HttpHeader::FieldValue::SemicolonSeparatedAttributes::parse("form-data; name=fi%20le; filename=file%20name.png"); ASSERT(parsed == solution); } } } ASSERT(SimpleWeb::Date::to_string(std::chrono::system_clock::now()).size() == 29); } ================================================ FILE: C/thirdparty/Simple-Web-Server/tests/status_code_test.cpp ================================================ #include "assert.hpp" #include "status_code.hpp" using namespace SimpleWeb; int main() { ASSERT(status_code("") == StatusCode::unknown); ASSERT(status_code("Error") == StatusCode::unknown); ASSERT(status_code("000 Error") == StatusCode::unknown); ASSERT(status_code(StatusCode::unknown) == ""); ASSERT(static_cast<int>(status_code("050 Custom")) == 50); ASSERT(static_cast<int>(status_code("950 Custom")) == 950); ASSERT(status_code("100 Continue") == StatusCode::information_continue); ASSERT(status_code("100 C") == StatusCode::information_continue); ASSERT(status_code("100") == StatusCode::information_continue); ASSERT(status_code(StatusCode::information_continue) == "100 Continue"); ASSERT(status_code("200 OK") == StatusCode::success_ok); ASSERT(status_code(StatusCode::success_ok) == "200 OK"); ASSERT(status_code("208 Already Reported") == StatusCode::success_already_reported); ASSERT(status_code(StatusCode::success_already_reported) == "208 Already Reported"); ASSERT(status_code("308 Permanent Redirect") == StatusCode::redirection_permanent_redirect); ASSERT(status_code(StatusCode::redirection_permanent_redirect) == "308 Permanent Redirect"); ASSERT(status_code("404 Not Found") == StatusCode::client_error_not_found); ASSERT(status_code(StatusCode::client_error_not_found) == "404 Not Found"); ASSERT(status_code("502 Bad Gateway") == StatusCode::server_error_bad_gateway); ASSERT(status_code(StatusCode::server_error_bad_gateway) == "502 Bad Gateway"); ASSERT(status_code("504 Gateway Timeout") == StatusCode::server_error_gateway_timeout); ASSERT(status_code(StatusCode::server_error_gateway_timeout) == "504 Gateway Timeout"); ASSERT(status_code("511 Network Authentication Required") == StatusCode::server_error_network_authentication_required); ASSERT(status_code(StatusCode::server_error_network_authentication_required) == "511 Network Authentication Required"); } ================================================ FILE: C/thirdparty/Simple-Web-Server/utility.hpp ================================================ #ifndef SIMPLE_WEB_UTILITY_HPP #define SIMPLE_WEB_UTILITY_HPP #include "status_code.hpp" #include <atomic> #include <chrono> #include <cstdlib> #include <ctime> #include <iostream> #include <memory> #include <mutex> #include <string> #include <unordered_map> #ifndef DEPRECATED #if defined(__GNUC__) || defined(__clang__) #define DEPRECATED __attribute__((deprecated)) #elif defined(_MSC_VER) #define DEPRECATED __declspec(deprecated) #else #define DEPRECATED #endif #endif #if __cplusplus > 201402L || _MSVC_LANG > 201402L #include <string_view> namespace SimpleWeb { using string_view = std::string_view; } #elif !defined(USE_STANDALONE_ASIO) #include <boost/utility/string_ref.hpp> namespace SimpleWeb { using string_view = boost::string_ref; } #else namespace SimpleWeb { using string_view = const std::string &; } #endif namespace SimpleWeb { inline bool case_insensitive_equal(const std::string &str1, const std::string &str2) noexcept { return str1.size() == str2.size() && std::equal(str1.begin(), str1.end(), str2.begin(), [](char a, char b) { return tolower(a) == tolower(b); }); } class CaseInsensitiveEqual { public: bool operator()(const std::string &str1, const std::string &str2) const noexcept { return case_insensitive_equal(str1, str2); } }; // Based on https://stackoverflow.com/questions/2590677/how-do-i-combine-hash-values-in-c0x/2595226#2595226 class CaseInsensitiveHash { public: std::size_t operator()(const std::string &str) const noexcept { std::size_t h = 0; std::hash<int> hash; for(auto c : str) h ^= hash(tolower(c)) + 0x9e3779b9 + (h << 6) + (h >> 2); return h; } }; using CaseInsensitiveMultimap = std::unordered_multimap<std::string, std::string, CaseInsensitiveHash, CaseInsensitiveEqual>; /// Percent encoding and decoding class Percent { public: /// Returns percent-encoded string static std::string encode(const std::string &value) noexcept { static auto hex_chars = "0123456789ABCDEF"; std::string result; result.reserve(value.size()); // Minimum size of result for(auto &chr : value) { if(!((chr >= '0' && chr <= '9') || (chr >= 'A' && chr <= 'Z') || (chr >= 'a' && chr <= 'z') || chr == '-' || chr == '.' || chr == '_' || chr == '~')) result += std::string("%") + hex_chars[static_cast<unsigned char>(chr) >> 4] + hex_chars[static_cast<unsigned char>(chr) & 15]; else result += chr; } return result; } /// Returns percent-decoded string static std::string decode(const std::string &value) noexcept { std::string result; result.reserve(value.size() / 3 + (value.size() % 3)); // Minimum size of result for(std::size_t i = 0; i < value.size(); ++i) { auto &chr = value[i]; if(chr == '%' && i + 2 < value.size()) { auto hex = value.substr(i + 1, 2); auto decoded_chr = static_cast<char>(std::strtol(hex.c_str(), nullptr, 16)); result += decoded_chr; i += 2; } else if(chr == '+') result += ' '; else result += chr; } return result; } }; /// Query string creation and parsing class QueryString { public: /// Returns query string created from given field names and values static std::string create(const CaseInsensitiveMultimap &fields) noexcept { std::string result; bool first = true; for(auto &field : fields) { result += (!first ? "&" : "") + field.first + '=' + Percent::encode(field.second); first = false; } return result; } /// Returns query keys with percent-decoded values. static CaseInsensitiveMultimap parse(const std::string &query_string) noexcept { CaseInsensitiveMultimap result; if(query_string.empty()) return result; std::size_t name_pos = 0; auto name_end_pos = std::string::npos; auto value_pos = std::string::npos; for(std::size_t c = 0; c < query_string.size(); ++c) { if(query_string[c] == '&') { auto name = query_string.substr(name_pos, (name_end_pos == std::string::npos ? c : name_end_pos) - name_pos); if(!name.empty()) { auto value = value_pos == std::string::npos ? std::string() : query_string.substr(value_pos, c - value_pos); result.emplace(std::move(name), Percent::decode(value)); } name_pos = c + 1; name_end_pos = std::string::npos; value_pos = std::string::npos; } else if(query_string[c] == '=' && name_end_pos == std::string::npos) { name_end_pos = c; value_pos = c + 1; } } if(name_pos < query_string.size()) { auto name = query_string.substr(name_pos, (name_end_pos == std::string::npos ? std::string::npos : name_end_pos - name_pos)); if(!name.empty()) { auto value = value_pos >= query_string.size() ? std::string() : query_string.substr(value_pos); result.emplace(std::move(name), Percent::decode(value)); } } return result; } }; class HttpHeader { public: /// Parse header fields from stream static CaseInsensitiveMultimap parse(std::istream &stream) noexcept { CaseInsensitiveMultimap result; std::string line; std::size_t param_end; while(getline(stream, line) && (param_end = line.find(':')) != std::string::npos) { std::size_t value_start = param_end + 1; while(value_start + 1 < line.size() && line[value_start] == ' ') ++value_start; if(value_start < line.size()) result.emplace(line.substr(0, param_end), line.substr(value_start, line.size() - value_start - (line.back() == '\r' ? 1 : 0))); } return result; } class FieldValue { public: class SemicolonSeparatedAttributes { public: /// Parse Set-Cookie or Content-Disposition from given header field value. /// Attribute values are percent-decoded. static CaseInsensitiveMultimap parse(const std::string &value) { CaseInsensitiveMultimap result; std::size_t name_start_pos = std::string::npos; std::size_t name_end_pos = std::string::npos; std::size_t value_start_pos = std::string::npos; for(std::size_t c = 0; c < value.size(); ++c) { if(name_start_pos == std::string::npos) { if(value[c] != ' ' && value[c] != ';') name_start_pos = c; } else { if(name_end_pos == std::string::npos) { if(value[c] == ';') { result.emplace(value.substr(name_start_pos, c - name_start_pos), std::string()); name_start_pos = std::string::npos; } else if(value[c] == '=') name_end_pos = c; } else { if(value_start_pos == std::string::npos) { if(value[c] == '"' && c + 1 < value.size()) value_start_pos = c + 1; else value_start_pos = c; } else if(value[c] == '"' || value[c] == ';') { result.emplace(value.substr(name_start_pos, name_end_pos - name_start_pos), Percent::decode(value.substr(value_start_pos, c - value_start_pos))); name_start_pos = std::string::npos; name_end_pos = std::string::npos; value_start_pos = std::string::npos; } } } } if(name_start_pos != std::string::npos) { if(name_end_pos == std::string::npos) result.emplace(value.substr(name_start_pos), std::string()); else if(value_start_pos != std::string::npos) { if(value.back() == '"') result.emplace(value.substr(name_start_pos, name_end_pos - name_start_pos), Percent::decode(value.substr(value_start_pos, value.size() - 1))); else result.emplace(value.substr(name_start_pos, name_end_pos - name_start_pos), Percent::decode(value.substr(value_start_pos))); } } return result; } }; }; }; class RequestMessage { public: /** Parse request line and header fields from a request stream. * * @param[in] stream Stream to parse. * @param[out] method HTTP method. * @param[out] path Path from request URI. * @param[out] query_string Query string from request URI. * @param[out] version HTTP version. * @param[out] header Header fields. * * @return True if stream is parsed successfully, false if not. */ static bool parse(std::istream &stream, std::string &method, std::string &path, std::string &query_string, std::string &version, CaseInsensitiveMultimap &header) noexcept { std::string line; std::size_t method_end; if(getline(stream, line) && (method_end = line.find(' ')) != std::string::npos) { method = line.substr(0, method_end); std::size_t query_start = std::string::npos; std::size_t path_and_query_string_end = std::string::npos; for(std::size_t i = method_end + 1; i < line.size(); ++i) { if(line[i] == '?' && (i + 1) < line.size() && query_start == std::string::npos) query_start = i + 1; else if(line[i] == ' ') { path_and_query_string_end = i; break; } } if(path_and_query_string_end != std::string::npos) { if(query_start != std::string::npos) { path = line.substr(method_end + 1, query_start - method_end - 2); query_string = line.substr(query_start, path_and_query_string_end - query_start); } else path = line.substr(method_end + 1, path_and_query_string_end - method_end - 1); std::size_t protocol_end; if((protocol_end = line.find('/', path_and_query_string_end + 1)) != std::string::npos) { if(line.compare(path_and_query_string_end + 1, protocol_end - path_and_query_string_end - 1, "HTTP") != 0) return false; version = line.substr(protocol_end + 1, line.size() - protocol_end - 2); } else return false; header = HttpHeader::parse(stream); } else return false; } else return false; return true; } }; class ResponseMessage { public: /** Parse status line and header fields from a response stream. * * @param[in] stream Stream to parse. * @param[out] version HTTP version. * @param[out] status_code HTTP status code. * @param[out] header Header fields. * * @return True if stream is parsed successfully, false if not. */ static bool parse(std::istream &stream, std::string &version, std::string &status_code, CaseInsensitiveMultimap &header) noexcept { std::string line; std::size_t version_end; if(getline(stream, line) && (version_end = line.find(' ')) != std::string::npos) { if(5 < line.size()) version = line.substr(5, version_end - 5); else return false; if((version_end + 1) < line.size()) status_code = line.substr(version_end + 1, line.size() - (version_end + 1) - (line.back() == '\r' ? 1 : 0)); else return false; header = HttpHeader::parse(stream); } else return false; return true; } }; /// Date class working with formats specified in RFC 7231 Date/Time Formats class Date { public: /// Returns the given std::chrono::system_clock::time_point as a string with the following format: Wed, 31 Jul 2019 11:34:23 GMT. static std::string to_string(const std::chrono::system_clock::time_point time_point) noexcept { static std::string result_cache; static std::chrono::system_clock::time_point last_time_point; static std::mutex mutex; std::lock_guard<std::mutex> lock(mutex); if(std::chrono::duration_cast<std::chrono::seconds>(time_point - last_time_point).count() == 0 && !result_cache.empty()) return result_cache; last_time_point = time_point; std::string result; result.reserve(29); auto time = std::chrono::system_clock::to_time_t(time_point); tm tm; #if defined(_MSC_VER) || defined(__MINGW32__) if(gmtime_s(&tm, &time) != 0) return {}; auto gmtime = &tm; #else auto gmtime = gmtime_r(&time, &tm); if(!gmtime) return {}; #endif switch(gmtime->tm_wday) { case 0: result += "Sun, "; break; case 1: result += "Mon, "; break; case 2: result += "Tue, "; break; case 3: result += "Wed, "; break; case 4: result += "Thu, "; break; case 5: result += "Fri, "; break; case 6: result += "Sat, "; break; } result += gmtime->tm_mday < 10 ? '0' : static_cast<char>(gmtime->tm_mday / 10 + 48); result += static_cast<char>(gmtime->tm_mday % 10 + 48); switch(gmtime->tm_mon) { case 0: result += " Jan "; break; case 1: result += " Feb "; break; case 2: result += " Mar "; break; case 3: result += " Apr "; break; case 4: result += " May "; break; case 5: result += " Jun "; break; case 6: result += " Jul "; break; case 7: result += " Aug "; break; case 8: result += " Sep "; break; case 9: result += " Oct "; break; case 10: result += " Nov "; break; case 11: result += " Dec "; break; } auto year = gmtime->tm_year + 1900; result += static_cast<char>(year / 1000 + 48); result += static_cast<char>((year / 100) % 10 + 48); result += static_cast<char>((year / 10) % 10 + 48); result += static_cast<char>(year % 10 + 48); result += ' '; result += gmtime->tm_hour < 10 ? '0' : static_cast<char>(gmtime->tm_hour / 10 + 48); result += static_cast<char>(gmtime->tm_hour % 10 + 48); result += ':'; result += gmtime->tm_min < 10 ? '0' : static_cast<char>(gmtime->tm_min / 10 + 48); result += static_cast<char>(gmtime->tm_min % 10 + 48); result += ':'; result += gmtime->tm_sec < 10 ? '0' : static_cast<char>(gmtime->tm_sec / 10 + 48); result += static_cast<char>(gmtime->tm_sec % 10 + 48); result += " GMT"; result_cache = result; return result; } }; } // namespace SimpleWeb #ifdef __SSE2__ #include <emmintrin.h> namespace SimpleWeb { inline void spin_loop_pause() noexcept { _mm_pause(); } } // namespace SimpleWeb // TODO: need verification that the following checks are correct: #elif defined(_MSC_VER) && _MSC_VER >= 1800 && (defined(_M_X64) || defined(_M_IX86)) #include <intrin.h> namespace SimpleWeb { inline void spin_loop_pause() noexcept { _mm_pause(); } } // namespace SimpleWeb #else namespace SimpleWeb { inline void spin_loop_pause() noexcept {} } // namespace SimpleWeb #endif namespace SimpleWeb { /// Makes it possible to for instance cancel Asio handlers without stopping asio::io_service. class ScopeRunner { /// Scope count that is set to -1 if scopes are to be canceled. std::atomic<long> count; public: class SharedLock { friend class ScopeRunner; std::atomic<long> &count; SharedLock(std::atomic<long> &count) noexcept : count(count) {} SharedLock &operator=(const SharedLock &) = delete; SharedLock(const SharedLock &) = delete; public: ~SharedLock() noexcept { count.fetch_sub(1); } }; ScopeRunner() noexcept : count(0) {} /// Returns nullptr if scope should be exited, or a shared lock otherwise. /// The shared lock ensures that a potential destructor call is delayed until all locks are released. std::unique_ptr<SharedLock> continue_lock() noexcept { long expected = count; while(expected >= 0 && !count.compare_exchange_weak(expected, expected + 1)) spin_loop_pause(); if(expected < 0) return nullptr; else return std::unique_ptr<SharedLock>(new SharedLock(count)); } /// Blocks until all shared locks are released, then prevents future shared locks. void stop() noexcept { long expected = 0; while(!count.compare_exchange_weak(expected, -1)) { if(expected < 0) return; expected = 0; spin_loop_pause(); } } }; } // namespace SimpleWeb #endif // SIMPLE_WEB_UTILITY_HPP ================================================ FILE: C/thirdparty/Simple-Web-Server/web/index.html ================================================ <html> <head> <title>Simple-Web-Server html-file This is the content of index.html ================================================ FILE: C/thirdparty/Simple-Web-Server/web/test.html ================================================ Simple-Web-Server html-file This is the content of test.html ================================================ FILE: C/thirdparty/rapidjson/.gitattributes ================================================ # Set the default behavior, in case people don't have core.autocrlf set. * text=auto # Explicitly declare text files you want to always be normalized and converted # to native line endings on checkout. *.cpp text *.h text *.txt text *.md text *.cmake text *.svg text *.dot text *.yml text *.in text *.sh text *.autopkg text Dockerfile text # Denote all files that are truly binary and should not be modified. *.png binary *.jpg binary *.json binary ================================================ FILE: C/thirdparty/rapidjson/.gitignore ================================================ /bin/* !/bin/data !/bin/encodings !/bin/jsonchecker !/bin/types !/bin/unittestschema /build /doc/html /doc/doxygen_*.db *.a # Temporary files created during CMake build CMakeCache.txt CMakeFiles cmake_install.cmake CTestTestfile.cmake Makefile RapidJSON*.cmake RapidJSON.pc Testing /googletest install_manifest.txt Doxyfile Doxyfile.zh-cn DartConfiguration.tcl *.nupkg # Files created by OS *.DS_Store ================================================ FILE: C/thirdparty/rapidjson/.gitmodules ================================================ [submodule "thirdparty/gtest"] path = thirdparty/gtest url = https://github.com/google/googletest.git ================================================ FILE: C/thirdparty/rapidjson/.travis.yml ================================================ sudo: required dist: xenial language: cpp cache: - ccache addons: apt: sources: - ubuntu-toolchain-r-test packages: - cmake - valgrind - clang-8 env: global: - USE_CCACHE=1 - CCACHE_SLOPPINESS=pch_defines,time_macros - CCACHE_COMPRESS=1 - CCACHE_MAXSIZE=100M - ARCH_FLAGS_x86='-m32' # #266: don't use SSE on 32-bit - ARCH_FLAGS_x86_64='-msse4.2' # use SSE4.2 on 64-bit - ARCH_FLAGS_aarch64='-march=armv8-a' - GITHUB_REPO='Tencent/rapidjson' - secure: "HrsaCb+N66EG1HR+LWH1u51SjaJyRwJEDzqJGYMB7LJ/bfqb9mWKF1fLvZGk46W5t7TVaXRDD5KHFx9DPWvKn4gRUVkwTHEy262ah5ORh8M6n/6VVVajeV/AYt2C0sswdkDBDO4Xq+xy5gdw3G8s1A4Inbm73pUh+6vx+7ltBbk=" matrix: include: # gcc - env: CONF=release ARCH=x86 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=OFF compiler: gcc arch: amd64 - env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=OFF compiler: gcc arch: amd64 - env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=ON compiler: gcc arch: amd64 - env: CONF=debug ARCH=x86 CXX11=OFF CXX17=OFF CXX20=OFF MEMBERSMAP=OFF compiler: gcc arch: amd64 - env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=OFF CXX20=OFF MEMBERSMAP=OFF compiler: gcc arch: amd64 - env: CONF=debug ARCH=x86 CXX11=OFF CXX17=ON CXX20=OFF MEMBERSMAP=ON CXX_FLAGS='-D_GLIBCXX_DEBUG' compiler: gcc arch: amd64 - env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=ON CXX20=OFF MEMBERSMAP=ON CXX_FLAGS='-D_GLIBCXX_DEBUG' compiler: gcc arch: amd64 - env: CONF=release ARCH=aarch64 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=OFF compiler: gcc arch: arm64 - env: CONF=release ARCH=aarch64 CXX11=OFF CXX17=OFF CXX20=OFF MEMBERSMAP=OFF compiler: gcc arch: arm64 - env: CONF=release ARCH=aarch64 CXX11=OFF CXX17=ON CXX20=OFF MEMBERSMAP=ON compiler: gcc arch: arm64 # clang - env: CONF=release ARCH=x86 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=ON CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=ON CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=release ARCH=x86_64 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=OFF CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=debug ARCH=x86 CXX11=OFF CXX17=OFF CXX20=OFF MEMBERSMAP=ON CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=OFF CXX20=OFF MEMBERSMAP=ON CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=debug ARCH=x86 CXX11=OFF CXX17=ON CXX20=OFF MEMBERSMAP=OFF CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=debug ARCH=x86_64 CXX11=OFF CXX17=ON CXX20=OFF MEMBERSMAP=OFF CCACHE_CPP2=yes compiler: clang arch: amd64 - env: CONF=debug ARCH=aarch64 CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=ON CCACHE_CPP2=yes compiler: clang arch: arm64 - env: CONF=debug ARCH=aarch64 CXX11=OFF CXX17=OFF CXX20=OFF MEMBERSMAP=ON CCACHE_CPP2=yes compiler: clang arch: arm64 - env: CONF=debug ARCH=aarch64 CXX11=OFF CXX17=ON CXX20=OFF MEMBERSMAP=OFF CCACHE_CPP2=yes compiler: clang arch: arm64 # coverage report - env: CONF=debug ARCH=x86 GCOV_FLAGS='--coverage' CXX_FLAGS='-O0' CXX11=OFF CXX17=OFF CXX20=OFF compiler: gcc arch: amd64 cache: - ccache - pip after_success: - pip install --user cpp-coveralls - coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h - env: CONF=debug ARCH=x86_64 GCOV_FLAGS='--coverage' CXX_FLAGS='-O0' CXX11=ON CXX17=OFF CXX20=OFF MEMBERSMAP=ON compiler: gcc arch: amd64 cache: - ccache - pip after_success: - pip install --user cpp-coveralls - coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h - env: CONF=debug ARCH=aarch64 GCOV_FLAGS='--coverage' CXX_FLAGS='-O0' CXX11=OFF CXX17=ON CXX20=OFF compiler: gcc arch: arm64 cache: - ccache - pip after_success: - pip install --user cpp-coveralls - coveralls -r .. --gcov-options '\-lp' -e thirdparty -e example -e test -e build/CMakeFiles -e include/rapidjson/msinttypes -e include/rapidjson/internal/meta.h -e include/rapidjson/error/en.h - script: # Documentation task - cd build - cmake .. -DRAPIDJSON_HAS_STDSTRING=ON -DCMAKE_VERBOSE_MAKEFILE=ON - make travis_doc cache: false addons: apt: packages: - doxygen before_install: - if [ "x86_64" = "$(arch)" ]; then sudo apt-get install -y g++-multilib libc6-dbg:i386 --allow-unauthenticated; fi before_script: # travis provides clang-7 for amd64 and clang-3.8 for arm64 # here use clang-8 to all architectures as clang-7 is not available for arm64 - if [ -f /usr/bin/clang++-8 ]; then sudo update-alternatives --install /usr/bin/clang++ clang++ /usr/bin/clang++-8 1000; sudo update-alternatives --config clang++; export PATH=/usr/bin:$PATH; fi - if [ "$CXX" = "clang++" ]; then export CCACHE_CPP2=yes; fi - ccache -s # hack to avoid Valgrind bug (https://bugs.kde.org/show_bug.cgi?id=326469), # exposed by merging PR#163 (using -march=native) # TODO: Since this bug is already fixed. Remove this when valgrind can be upgraded. - sed -i "s/-march=native//" CMakeLists.txt - mkdir build script: - if [ "$CXX" = "clang++" ]; then export CXXFLAGS="-stdlib=libc++ ${CXXFLAGS}"; fi - > eval "ARCH_FLAGS=\${ARCH_FLAGS_${ARCH}}" ; (cd build && cmake -DRAPIDJSON_HAS_STDSTRING=ON -DRAPIDJSON_USE_MEMBERSMAP=$MEMBERSMAP -DRAPIDJSON_BUILD_CXX11=$CXX11 -DRAPIDJSON_BUILD_CXX17=$CXX17 -DRAPIDJSON_BUILD_CXX20=$CXX20 -DCMAKE_VERBOSE_MAKEFILE=ON -DCMAKE_BUILD_TYPE=$CONF -DCMAKE_CXX_FLAGS="$ARCH_FLAGS $GCOV_FLAGS $CXX_FLAGS" -DCMAKE_EXE_LINKER_FLAGS=$GCOV_FLAGS ..) - cd build - make tests -j 2 - make examples -j 2 - ctest -j 2 -V `[ "$CONF" = "release" ] || echo "-E perftest"` ================================================ FILE: C/thirdparty/rapidjson/CHANGELOG.md ================================================ # Change Log All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] ## 1.1.0 - 2016-08-25 ### Added * Add GenericDocument ctor overload to specify JSON type (#369) * Add FAQ (#372, #373, #374, #376) * Add forward declaration header `fwd.h` * Add @PlatformIO Library Registry manifest file (#400) * Implement assignment operator for BigInteger (#404) * Add comments support (#443) * Adding coapp definition (#460) * documenttest.cpp: EXPECT_THROW when checking empty allocator (470) * GenericDocument: add implicit conversion to ParseResult (#480) * Use with C++ linkage on Windows ARM (#485) * Detect little endian for Microsoft ARM targets * Check Nan/Inf when writing a double (#510) * Add JSON Schema Implementation (#522) * Add iostream wrapper (#530) * Add Jsonx example for converting JSON into JSONx (a XML format) (#531) * Add optional unresolvedTokenIndex parameter to Pointer::Get() (#532) * Add encoding validation option for Writer/PrettyWriter (#534) * Add Writer::SetMaxDecimalPlaces() (#536) * Support {0, } and {0, m} in Regex (#539) * Add Value::Get/SetFloat(), Value::IsLossLessFloat/Double() (#540) * Add stream position check to reader unit tests (#541) * Add Templated accessors and range-based for (#542) * Add (Pretty)Writer::RawValue() (#543) * Add Document::Parse(std::string), Document::Parse(const char*, size_t length) and related APIs. (#553) * Add move constructor for GenericSchemaDocument (#554) * Add VS2010 and VS2015 to AppVeyor CI (#555) * Add parse-by-parts example (#556, #562) * Support parse number as string (#564, #589) * Add kFormatSingleLineArray for PrettyWriter (#577) * Added optional support for trailing commas (#584) * Added filterkey and filterkeydom examples (#615) * Added npm docs (#639) * Allow options for writing and parsing NaN/Infinity (#641) * Add std::string overload to PrettyWriter::Key() when RAPIDJSON_HAS_STDSTRING is defined (#698) ### Fixed * Fix gcc/clang/vc warnings (#350, #394, #397, #444, #447, #473, #515, #582, #589, #595, #667) * Fix documentation (#482, #511, #550, #557, #614, #635, #660) * Fix emscripten alignment issue (#535) * Fix missing allocator to uses of AddMember in document (#365) * CMake will no longer complain that the minimum CMake version is not specified (#501) * Make it usable with old VC8 (VS2005) (#383) * Prohibit C++11 move from Document to Value (#391) * Try to fix incorrect 64-bit alignment (#419) * Check return of fwrite to avoid warn_unused_result build failures (#421) * Fix UB in GenericDocument::ParseStream (#426) * Keep Document value unchanged on parse error (#439) * Add missing return statement (#450) * Fix Document::Parse(const Ch*) for transcoding (#478) * encodings.h: fix typo in preprocessor condition (#495) * Custom Microsoft headers are necessary only for Visual Studio 2012 and lower (#559) * Fix memory leak for invalid regex (26e69ffde95ba4773ab06db6457b78f308716f4b) * Fix a bug in schema minimum/maximum keywords for 64-bit integer (e7149d665941068ccf8c565e77495521331cf390) * Fix a crash bug in regex (#605) * Fix schema "required" keyword cannot handle duplicated keys (#609) * Fix cmake CMP0054 warning (#612) * Added missing include guards in istreamwrapper.h and ostreamwrapper.h (#634) * Fix undefined behaviour (#646) * Fix buffer overrun using PutN (#673) * Fix rapidjson::value::Get() may returns wrong data (#681) * Add Flush() for all value types (#689) * Handle malloc() fail in PoolAllocator (#691) * Fix builds on x32 platform. #703 ### Changed * Clarify problematic JSON license (#392) * Move Travis to container based infrastructure (#504, #558) * Make whitespace array more compact (#513) * Optimize Writer::WriteString() with SIMD (#544) * x86-64 48-bit pointer optimization for GenericValue (#546) * Define RAPIDJSON_HAS_CXX11_RVALUE_REFS directly in clang (#617) * Make GenericSchemaDocument constructor explicit (#674) * Optimize FindMember when use std::string (#690) ## [1.0.2] - 2015-05-14 ### Added * Add Value::XXXMember(...) overloads for std::string (#335) ### Fixed * Include rapidjson.h for all internal/error headers. * Parsing some numbers incorrectly in full-precision mode (`kFullPrecisionParseFlag`) (#342) * Fix some numbers parsed incorrectly (#336) * Fix alignment of 64bit platforms (#328) * Fix MemoryPoolAllocator::Clear() to clear user-buffer (0691502573f1afd3341073dd24b12c3db20fbde4) ### Changed * CMakeLists for include as a thirdparty in projects (#334, #337) * Change Document::ParseStream() to use stack allocator for Reader (ffbe38614732af8e0b3abdc8b50071f386a4a685) ## [1.0.1] - 2015-04-25 ### Added * Changelog following [Keep a CHANGELOG](https://github.com/olivierlacan/keep-a-changelog) suggestions. ### Fixed * Parsing of some numbers (e.g. "1e-00011111111111") causing assertion (#314). * Visual C++ 32-bit compilation error in `diyfp.h` (#317). ## [1.0.0] - 2015-04-22 ### Added * 100% [Coverall](https://coveralls.io/r/Tencent/rapidjson?branch=master) coverage. * Version macros (#311) ### Fixed * A bug in trimming long number sequence (4824f12efbf01af72b8cb6fc96fae7b097b73015). * Double quote in unicode escape (#288). * Negative zero roundtrip (double only) (#289). * Standardize behavior of `memcpy()` and `malloc()` (0c5c1538dcfc7f160e5a4aa208ddf092c787be5a, #305, 0e8bbe5e3ef375e7f052f556878be0bd79e9062d). ### Removed * Remove an invalid `Document::ParseInsitu()` API (e7f1c6dd08b522cfcf9aed58a333bd9a0c0ccbeb). ## 1.0-beta - 2015-04-8 ### Added * RFC 7159 (#101) * Optional Iterative Parser (#76) * Deep-copy values (#20) * Error code and message (#27) * ASCII Encoding (#70) * `kParseStopWhenDoneFlag` (#83) * `kParseFullPrecisionFlag` (881c91d696f06b7f302af6d04ec14dd08db66ceb) * Add `Key()` to handler concept (#134) * C++11 compatibility and support (#128) * Optimized number-to-string and vice versa conversions (#137, #80) * Short-String Optimization (#131) * Local stream optimization by traits (#32) * Travis & Appveyor Continuous Integration, with Valgrind verification (#24, #242) * Redo all documentation (English, Simplified Chinese) ### Changed * Copyright ownership transferred to THL A29 Limited (a Tencent company). * Migrating from Premake to CMAKE (#192) * Resolve all warning reports ### Removed * Remove other JSON libraries for performance comparison (#180) ## 0.11 - 2012-11-16 ## 0.1 - 2011-11-18 [Unreleased]: https://github.com/Tencent/rapidjson/compare/v1.1.0...HEAD [1.1.0]: https://github.com/Tencent/rapidjson/compare/v1.0.2...v1.1.0 [1.0.2]: https://github.com/Tencent/rapidjson/compare/v1.0.1...v1.0.2 [1.0.1]: https://github.com/Tencent/rapidjson/compare/v1.0.0...v1.0.1 [1.0.0]: https://github.com/Tencent/rapidjson/compare/v1.0-beta...v1.0.0 ================================================ FILE: C/thirdparty/rapidjson/CMakeLists.txt ================================================ CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12) if(POLICY CMP0025) # detect Apple's Clang cmake_policy(SET CMP0025 NEW) endif() if(POLICY CMP0054) cmake_policy(SET CMP0054 NEW) endif() SET(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/CMakeModules) set(LIB_MAJOR_VERSION "1") set(LIB_MINOR_VERSION "1") set(LIB_PATCH_VERSION "0") set(LIB_VERSION_STRING "${LIB_MAJOR_VERSION}.${LIB_MINOR_VERSION}.${LIB_PATCH_VERSION}") if (CMAKE_VERSION VERSION_LESS 3.0) PROJECT(RapidJSON CXX) else() cmake_policy(SET CMP0048 NEW) PROJECT(RapidJSON VERSION "${LIB_VERSION_STRING}" LANGUAGES CXX) endif() # compile in release with debug info mode by default if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "RelWithDebInfo" CACHE STRING "Choose the type of build, options are: Debug Release RelWithDebInfo MinSizeRel." FORCE) endif() # Build all binaries in a separate directory SET(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin) option(RAPIDJSON_BUILD_DOC "Build rapidjson documentation." ON) option(RAPIDJSON_BUILD_EXAMPLES "Build rapidjson examples." ON) option(RAPIDJSON_BUILD_TESTS "Build rapidjson perftests and unittests." ON) option(RAPIDJSON_BUILD_THIRDPARTY_GTEST "Use gtest installation in `thirdparty/gtest` by default if available" OFF) option(RAPIDJSON_BUILD_CXX11 "Build rapidjson with C++11" ON) option(RAPIDJSON_BUILD_CXX17 "Build rapidjson with C++17" OFF) option(RAPIDJSON_BUILD_CXX20 "Build rapidjson with C++20" OFF) if(RAPIDJSON_BUILD_CXX11) set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) endif() option(RAPIDJSON_BUILD_ASAN "Build rapidjson with address sanitizer (gcc/clang)" OFF) option(RAPIDJSON_BUILD_UBSAN "Build rapidjson with undefined behavior sanitizer (gcc/clang)" OFF) option(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT "Build rapidjson with -march or -mcpu options" ON) option(RAPIDJSON_HAS_STDSTRING "" OFF) if(RAPIDJSON_HAS_STDSTRING) add_definitions(-DRAPIDJSON_HAS_STDSTRING) endif() option(RAPIDJSON_USE_MEMBERSMAP "" OFF) if(RAPIDJSON_USE_MEMBERSMAP) add_definitions(-DRAPIDJSON_USE_MEMBERSMAP=1) endif() find_program(CCACHE_FOUND ccache) if(CCACHE_FOUND) set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache) set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache) if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Qunused-arguments -fcolor-diagnostics") endif() endif(CCACHE_FOUND) find_program(VALGRIND_FOUND valgrind) if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU") if(RAPIDJSON_ENABLE_INSTRUMENTATION_OPT AND NOT CMAKE_CROSSCOMPILING) if(CMAKE_SYSTEM_PROCESSOR STREQUAL "powerpc" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native") else() #FIXME: x86 is -march=native, but doesn't mean every arch is this option. To keep original project's compatibility, I leave this except POWER. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") endif() endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror") set(EXTRA_CXX_FLAGS -Weffc++ -Wswitch-default -Wfloat-equal -Wconversion -Wsign-conversion) if (RAPIDJSON_BUILD_CXX11 AND CMAKE_VERSION VERSION_LESS 3.1) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.7.0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") endif() elseif (RAPIDJSON_BUILD_CXX17 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") elseif (RAPIDJSON_BUILD_CXX20 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++20") endif() if (RAPIDJSON_BUILD_ASAN) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.8.0") message(FATAL_ERROR "GCC < 4.8 doesn't support the address sanitizer") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") endif() endif() if (RAPIDJSON_BUILD_UBSAN) if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.9.0") message(FATAL_ERROR "GCC < 4.9 doesn't support the undefined behavior sanitizer") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined") endif() endif() elseif (CMAKE_CXX_COMPILER_ID MATCHES "Clang") if(NOT CMAKE_CROSSCOMPILING) if(CMAKE_SYSTEM_PROCESSOR STREQUAL "powerpc" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64" OR CMAKE_SYSTEM_PROCESSOR STREQUAL "ppc64le") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mcpu=native") else() #FIXME: x86 is -march=native, but doesn't mean every arch is this option. To keep original project's compatibility, I leave this except POWER. set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=native") endif() endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Werror -Wno-missing-field-initializers") set(EXTRA_CXX_FLAGS -Weffc++ -Wswitch-default -Wfloat-equal -Wconversion -Wimplicit-fallthrough) if (RAPIDJSON_BUILD_CXX11 AND CMAKE_VERSION VERSION_LESS 3.1) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") elseif (RAPIDJSON_BUILD_CXX17 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "4.0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17") elseif (RAPIDJSON_BUILD_CXX20 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "10.0") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++20") endif() if (RAPIDJSON_BUILD_ASAN) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=address") endif() if (RAPIDJSON_BUILD_UBSAN) if (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined-trap -fsanitize-undefined-trap-on-error") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined") endif() endif() elseif (CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") add_definitions(-D_CRT_SECURE_NO_WARNINGS=1) add_definitions(-DNOMINMAX) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc") # CMake >= 3.10 should handle the above CMAKE_CXX_STANDARD fine, otherwise use /std:c++XX with MSVC >= 19.10 if (RAPIDJSON_BUILD_CXX11 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "19.10") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++11") elseif (RAPIDJSON_BUILD_CXX17 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "19.14") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++17") elseif (RAPIDJSON_BUILD_CXX20 AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "19.29") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++20") endif() # Always compile with /WX if(CMAKE_CXX_FLAGS MATCHES "/WX-") string(REGEX REPLACE "/WX-" "/WX" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") else() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") endif() elseif (CMAKE_CXX_COMPILER_ID MATCHES "XL") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -qarch=auto") endif() #add extra search paths for libraries and includes SET(INCLUDE_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/include" CACHE PATH "The directory the headers are installed in") SET(LIB_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/lib" CACHE STRING "Directory where lib will install") SET(DOC_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/share/doc/${PROJECT_NAME}" CACHE PATH "Path to the documentation") IF(UNIX OR CYGWIN) SET(_CMAKE_INSTALL_DIR "${LIB_INSTALL_DIR}/cmake/${PROJECT_NAME}") ELSEIF(WIN32) SET(_CMAKE_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/cmake") ENDIF() SET(CMAKE_INSTALL_DIR "${_CMAKE_INSTALL_DIR}" CACHE PATH "The directory cmake files are installed in") include_directories(${CMAKE_CURRENT_SOURCE_DIR}/include) if(RAPIDJSON_BUILD_DOC) add_subdirectory(doc) endif() add_custom_target(travis_doc) add_custom_command(TARGET travis_doc COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/travis-doxygen.sh) if(RAPIDJSON_BUILD_EXAMPLES) add_subdirectory(example) endif() if(RAPIDJSON_BUILD_TESTS) if(MSVC11) # required for VS2012 due to missing support for variadic templates add_definitions(-D_VARIADIC_MAX=10) endif(MSVC11) add_subdirectory(test) include(CTest) endif() # pkg-config IF (UNIX OR CYGWIN) CONFIGURE_FILE (${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}.pc.in ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc @ONLY) INSTALL (FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}.pc DESTINATION "${LIB_INSTALL_DIR}/pkgconfig" COMPONENT pkgconfig) ENDIF() install(FILES readme.md DESTINATION "${DOC_INSTALL_DIR}" COMPONENT doc) # Add an interface target to export it add_library(RapidJSON INTERFACE) target_include_directories(RapidJSON INTERFACE $) install(DIRECTORY include/rapidjson DESTINATION "${INCLUDE_INSTALL_DIR}" COMPONENT dev) install(DIRECTORY example/ DESTINATION "${DOC_INSTALL_DIR}/examples" COMPONENT examples # Following patterns are for excluding the intermediate/object files # from an install of in-source CMake build. PATTERN "CMakeFiles" EXCLUDE PATTERN "Makefile" EXCLUDE PATTERN "cmake_install.cmake" EXCLUDE) # Provide config and version files to be used by other applications # =============================== ################################################################################ # Export package for use from the build tree EXPORT( PACKAGE ${PROJECT_NAME} ) # Create the RapidJSONConfig.cmake file for other cmake projects. # ... for the build tree SET( CONFIG_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) SET( CONFIG_DIR ${CMAKE_CURRENT_BINARY_DIR}) SET( ${PROJECT_NAME}_INCLUDE_DIR "\${${PROJECT_NAME}_SOURCE_DIR}/include" ) INCLUDE(CMakePackageConfigHelpers) CONFIGURE_FILE( ${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}Config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake @ONLY ) CONFIGURE_FILE(${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}ConfigVersion.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake @ONLY) # ... for the install tree SET( CMAKECONFIG_INSTALL_DIR ${LIB_INSTALL_DIR}/cmake/${PROJECT_NAME} ) FILE( RELATIVE_PATH REL_INCLUDE_DIR "${CMAKECONFIG_INSTALL_DIR}" "${CMAKE_INSTALL_PREFIX}/include" ) SET( ${PROJECT_NAME}_INCLUDE_DIR "\${${PROJECT_NAME}_CMAKE_DIR}/${REL_INCLUDE_DIR}" ) SET( CONFIG_SOURCE_DIR ) SET( CONFIG_DIR ) CONFIGURE_FILE( ${CMAKE_CURRENT_SOURCE_DIR}/${PROJECT_NAME}Config.cmake.in ${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}Config.cmake @ONLY ) INSTALL(FILES "${CMAKE_CURRENT_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/${PROJECT_NAME}Config.cmake" DESTINATION ${CMAKECONFIG_INSTALL_DIR} ) # Install files IF(CMAKE_INSTALL_DIR) INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Config.cmake ${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake DESTINATION "${CMAKE_INSTALL_DIR}" COMPONENT dev) INSTALL(TARGETS RapidJSON EXPORT RapidJSON-targets) INSTALL(EXPORT RapidJSON-targets DESTINATION ${CMAKE_INSTALL_DIR}) ENDIF() ================================================ FILE: C/thirdparty/rapidjson/CMakeModules/FindGTestSrc.cmake ================================================ SET(GTEST_SEARCH_PATH "${GTEST_SOURCE_DIR}" "${CMAKE_CURRENT_LIST_DIR}/../thirdparty/gtest/googletest") IF(UNIX) IF(RAPIDJSON_BUILD_THIRDPARTY_GTEST) LIST(APPEND GTEST_SEARCH_PATH "/usr/src/gtest") ELSE() LIST(INSERT GTEST_SEARCH_PATH 1 "/usr/src/gtest") ENDIF() ENDIF() FIND_PATH(GTEST_SOURCE_DIR NAMES CMakeLists.txt src/gtest_main.cc PATHS ${GTEST_SEARCH_PATH}) # Debian installs gtest include directory in /usr/include, thus need to look # for include directory separately from source directory. FIND_PATH(GTEST_INCLUDE_DIR NAMES gtest/gtest.h PATH_SUFFIXES include HINTS ${GTEST_SOURCE_DIR} PATHS ${GTEST_SEARCH_PATH}) INCLUDE(FindPackageHandleStandardArgs) find_package_handle_standard_args(GTestSrc DEFAULT_MSG GTEST_SOURCE_DIR GTEST_INCLUDE_DIR) ================================================ FILE: C/thirdparty/rapidjson/RapidJSON.pc.in ================================================ includedir=@INCLUDE_INSTALL_DIR@ Name: @PROJECT_NAME@ Description: A fast JSON parser/generator for C++ with both SAX/DOM style API Version: @LIB_VERSION_STRING@ URL: https://github.com/Tencent/rapidjson Cflags: -I${includedir} ================================================ FILE: C/thirdparty/rapidjson/RapidJSONConfig.cmake.in ================================================ @PACKAGE_INIT@ include ("${CMAKE_CURRENT_LIST_DIR}/RapidJSON-targets.cmake") ################################################################################ # RapidJSON source dir set( RapidJSON_SOURCE_DIR "@CONFIG_SOURCE_DIR@") ################################################################################ # RapidJSON build dir set( RapidJSON_DIR "@CONFIG_DIR@") ################################################################################ # Compute paths get_filename_component(RapidJSON_CMAKE_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) get_target_property(RapidJSON_INCLUDE_DIR RapidJSON INTERFACE_INCLUDE_DIRECTORIES) set( RapidJSON_INCLUDE_DIRS ${RapidJSON_INCLUDE_DIR} ) ================================================ FILE: C/thirdparty/rapidjson/RapidJSONConfigVersion.cmake.in ================================================ SET(PACKAGE_VERSION "@LIB_VERSION_STRING@") IF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) SET(PACKAGE_VERSION_EXACT "true") ENDIF (PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION) IF (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION) SET(PACKAGE_VERSION_COMPATIBLE "true") ELSE (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION) SET(PACKAGE_VERSION_UNSUITABLE "true") ENDIF (NOT PACKAGE_FIND_VERSION VERSION_GREATER PACKAGE_VERSION) ================================================ FILE: C/thirdparty/rapidjson/appveyor.yml ================================================ version: 1.1.0.{build} configuration: - Debug - Release environment: matrix: # - VS_VERSION: 9 2008 # VS_PLATFORM: win32 # - VS_VERSION: 9 2008 # VS_PLATFORM: x64 - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013 VS_VERSION: 10 2010 VS_PLATFORM: win32 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013 VS_VERSION: 10 2010 VS_PLATFORM: x64 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: ON - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013 VS_VERSION: 11 2012 VS_PLATFORM: win32 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: ON - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013 VS_VERSION: 11 2012 VS_PLATFORM: x64 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013 VS_VERSION: 12 2013 VS_PLATFORM: win32 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2013 VS_VERSION: 12 2013 VS_PLATFORM: x64 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: ON - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 VS_VERSION: 14 2015 VS_PLATFORM: win32 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: ON - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015 VS_VERSION: 14 2015 VS_PLATFORM: x64 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 VS_VERSION: 15 2017 VS_PLATFORM: win32 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 VS_VERSION: 15 2017 VS_PLATFORM: x64 CXX11: OFF CXX17: OFF CXX20: OFF MEMBERSMAP: ON - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 VS_VERSION: 15 2017 VS_PLATFORM: x64 CXX11: ON CXX17: OFF CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 VS_VERSION: 15 2017 VS_PLATFORM: x64 CXX11: OFF CXX17: ON CXX20: OFF MEMBERSMAP: OFF - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019 VS_VERSION: 16 2019 VS_PLATFORM: x64 CXX11: OFF CXX17: ON CXX20: OFF MEMBERSMAP: ON before_build: - git submodule update --init --recursive - cmake -H. -BBuild/VS -G "Visual Studio %VS_VERSION%" -DCMAKE_GENERATOR_PLATFORM=%VS_PLATFORM% -DCMAKE_VERBOSE_MAKEFILE=ON -DBUILD_SHARED_LIBS=true -DRAPIDJSON_BUILD_CXX11=%CXX11% -DRAPIDJSON_BUILD_CXX17=%CXX17% -DRAPIDJSON_BUILD_CXX20=%CXX20% -DRAPIDJSON_USE_MEMBERSMAP=%MEMBERSMAP% -Wno-dev build: project: Build\VS\RapidJSON.sln parallel: true verbosity: minimal test_script: - cd Build\VS && if %CONFIGURATION%==Debug (ctest --verbose -E perftest --build-config %CONFIGURATION%) else (ctest --verbose --build-config %CONFIGURATION%) ================================================ FILE: C/thirdparty/rapidjson/bin/data/glossary.json ================================================ { "glossary": { "title": "example glossary", "GlossDiv": { "title": "S", "GlossList": { "GlossEntry": { "ID": "SGML", "SortAs": "SGML", "GlossTerm": "Standard Generalized Markup Language", "Acronym": "SGML", "Abbrev": "ISO 8879:1986", "GlossDef": { "para": "A meta-markup language, used to create markup languages such as DocBook.", "GlossSeeAlso": ["GML", "XML"] }, "GlossSee": "markup" } } } } } ================================================ FILE: C/thirdparty/rapidjson/bin/data/menu.json ================================================ {"menu": { "header": "SVG Viewer", "items": [ {"id": "Open"}, {"id": "OpenNew", "label": "Open New"}, null, {"id": "ZoomIn", "label": "Zoom In"}, {"id": "ZoomOut", "label": "Zoom Out"}, {"id": "OriginalView", "label": "Original View"}, null, {"id": "Quality"}, {"id": "Pause"}, {"id": "Mute"}, null, {"id": "Find", "label": "Find..."}, {"id": "FindAgain", "label": "Find Again"}, {"id": "Copy"}, {"id": "CopyAgain", "label": "Copy Again"}, {"id": "CopySVG", "label": "Copy SVG"}, {"id": "ViewSVG", "label": "View SVG"}, {"id": "ViewSource", "label": "View Source"}, {"id": "SaveAs", "label": "Save As"}, null, {"id": "Help"}, {"id": "About", "label": "About Adobe CVG Viewer..."} ] }} ================================================ FILE: C/thirdparty/rapidjson/bin/data/readme.txt ================================================ sample.json is obtained from http://code.google.com/p/json-test-suite/downloads/detail?name=sample.zip ================================================ FILE: C/thirdparty/rapidjson/bin/data/sample.json ================================================ { "a": { "6U閆崬밺뀫颒myj츥휘:$薈mY햚#rz飏+玭V㭢뾿愴YꖚX亥ᮉ푊\u0006垡㐭룝\"厓ᔧḅ^Sqpv媫\"⤽걒\"˽Ἆ?ꇆ䬔未tv{DV鯀Tἆl凸g\\㈭ĭ즿UH㽤": null, "b茤z\\.N": [[ "ZL:ᅣዎ*Y|猫劁櫕荾Oj为1糕쪥泏S룂w࡛Ᏺ⸥蚙)", { "\"䬰ỐwD捾V`邀⠕VD㺝sH6[칑.:醥葹*뻵倻aD\"": true, "e浱up蔽Cr෠JK軵xCʨ<뜡癙Y獩ケ齈X/螗唻?<蘡+뷄㩤쳖3偑犾&\\첊xz坍崦ݻ鍴\"嵥B3㰃詤豺嚼aqJ⑆∥韼@\u000b㢊\u0015L臯.샥": false, "l?Ǩ喳e6㔡$M꼄I,(3᝝縢,䊀疅뉲B㴔傳䂴\u0088㮰钘ꜵ!ᅛ韽>": -5514085325291784739, "o㮚?\"춛㵉<\/﬊ࠃ䃪䝣wp6ἀ䱄[s*S嬈貒pᛥ㰉'돀": [{ "(QP윤懊FI<ꃣ『䕷[\"珒嶮?%Ḭ壍಻䇟0荤!藲끹bd浶tl\u2049#쯀@僞": {"i妾8홫": { ",M맃䞛K5nAㆴVN㒊햬$n꩑&ꎝ椞阫?/ṏ세뉪1x쥼㻤㪙`\"$쟒薟B煌܀쨝ଢ଼2掳7㙟鴙X婢\u0002": "Vዉ菈᧷⦌kﮞఈnz*﷜FM\"荭7ꍀ-VR<\/';䁙E9$䩉\f @s?퍪o3^衴cඎ䧪aK鼟q䆨c{䳠5mᒲՙ蘹ᮩ": { "F㲷JGo⯍P덵x뒳p䘧☔\"+ꨲ吿JfR㔹)4n紬G练Q፞!C|": true, "p^㫮솎oc.೚A㤠??r\u000f)⾽⌲們M2.䴘䩳:⫭胃\\፾@Fᭌ\\K": false, "蟌Tk愙潦伩": { "a<\/@ᾛ慂侇瘎": -7271305752851720826, "艓藬/>၄ṯ,XW~㲆w": {"E痧郶)㜓ha朗!N赻瞉駠uC\u20ad辠x퓮⣫P1ࠫLMMX'M刼唳됤": null, "P쓫晥%k覛ዩIUᇸ滨:噐혲lMR5䋈V梗>%幽u頖\\)쟟": null, "eg+昉~矠䧞难\b?gQ쭷筝\\eꮠNl{ಢ哭|]Mn銌╥zꖘzⱷ⭤ᮜ^": [ -1.30142114406914976E17, -1.7555215491128452E-19, null, "渾㨝ߏ牄귛r?돌?w[⚞ӻ~廩輫㼧/", -4.5737191805302129E18, null, "xy࿑M[oc셒竓Ⓔx?뜓y䊦>-D켍(&&?XKkc꩖ﺸᏋ뵞K伕6ী)딀P朁yW揙?훻魢傎EG碸9類៌g踲C⟌aEX舲:z꒸许", 3808159498143417627, null, {"m試\u20df1{G8&뚈h홯J<\/": { "3ஸ厠zs#1K7:rᥞoꅔꯧ&띇鵼鞫6跜#赿5l'8{7㕳(b/j\"厢aq籀ꏚ\u0015厼稥": [ -2226135764510113982, true, null, { "h%'맞S싅Hs&dl슾W0j鿏MםD놯L~S-㇡R쭬%": null, "⟓咔謡칲\u0000孺ꛭx旑檉㶆?": null, "恇I転;￸B2Y`z\\獓w,놏濐撐埵䂄)!䶢D=ഭ㴟jyY": { "$ࡘt厛毣ൢI芁<겿骫⫦6tr惺a": [ 6.385779736989334E-20, false, true, true, [ -6.891946211462334E-19, null, { "]-\\Ꟑ1/薓❧Ὂ\\l牑\u0007A郃)阜ᇒᓌ-塯`W峬G}SDb㬨Q臉⮻빌O鞟톴첂B㺱<ƈmu챑J㴹㷳픷Oㆩs": { "\"◉B\"pᶉt骔J꩸ᄇᛐi╰栛K쉷㉯鐩!㈐n칍䟅難>盥y铿e୔蒏M貹ヅ8嘋퀯䉶ጥ㏢殊뻳\"絧╿ꉑ䠥?∃蓊{}㣣Gk긔H1哵峱": false, "6.瀫cN䇮F㧺?\\椯=ڈT䘆4␘8qv": -3.5687501019676885E-19, "Q?yऴr혴{஀䳘p惭f1ﹸ䅷䕋贲<ྃᄊ繲hq\\b|#QSTs1c-7(䵢\u2069匏絘ꯉ:l毴汞t戀oෟᵶ뮱፣-醇Jx䙬䐁햢0࣫ᡁgrㄛ": "\u0011_xM/蘇Chv;dhA5.嗀绱V爤ﰦi뵲M", "⏑[\"ugoy^儣횎~U\\섯겜論l2jw஌yD腅̂\u0019": true, "ⵯɇ䐲᫿࢚!㯢l샅笶戮1꣖0Xe": null, "劅f넀識b宁焊E찓橵G!ʱ獓뭔雩괛": [{"p⹣켙[q>燣䍃㞽ᩲx:쓤삘7玑퇼0<\/q璂ᑁ[Z\\3䅵䧳\u0011㤧|妱緒C['췓Yꞟ3Z鳱雼P錻BU씧U`ᢶg蓱>.1ӧ譫'L_5V䏵Ц": [ false, false, {"22䂍盥N霂얢躰e9⑩_뵜斌n@B}$괻Yᐱ@䧋V\"☒-諯cV돯ʠ": true, "Ű螧ᔼ檍鍎땒딜qꄃH뜣<獧ूCY吓⸏>XQ㵡趌o끬k픀빯a(ܵ甏끆୯/6Nᪧ}搚ᆚ짌P牰泱鈷^d꣟#L삀\"㕹襻;k㸊\\f+": true, "쎣\",|⫝̸阊x庿k잣v庅$鈏괎炔k쬪O_": [ "잩AzZGz3v愠ꉈⵎ?㊱}S尳௏p\r2>췝IP䘈M)w|\u000eE", -9222726055990423201, null, [ false, {"´킮'뮤쯽Wx讐V,6ᩪ1紲aႈ\u205czD": [ -930994432421097536, 3157232031581030121, "l貚PY䃛5@䭄귻m㎮琸f": 1.0318894506812084E-19, "࢜⩢Ш䧔1肽씮+༎ᣰ闺馺窃䕨8Mƶq腽xc(៯夐J5굄䕁Qj_훨/~価.䢵慯틠퇱豠㼇Qﵘ$DuSp(8Uญ<\/ಟ룴𥳐ݩ$": 8350772684161555590, "ㆎQ䄾\u001bpᩭ${[諟^^骴᤮b^ㅥI┧T㉇⾞\"绦r䰂f矩'-7䡭桥Dz兔V9谶居㺍ᔊ䩯덲.\u001eL0ὅㅷ釣": [{ "<쯬J卷^숞u࠯䌗艞R9닪g㐾볎a䂈歖意:%鐔|ﵤ|y}>;2,覂⶚啵tb*仛8乒㓶B࿠㯉戩oX 貘5V嗆렽낁߼4h䧛ꍺM空\\b꿋貼": 8478577078537189402, "VD*|吝z~h譺aᯒ": { "YI췢K<\/濳xNne玗rJo쾘3핰鴊\"↱AR:ࢷ\"9?\"臁說)?誚ꊏe)_D翾W?&F6J@뺾ꍰNZ醊Z쾈വH嶿?炫㷱鬰M겈᭨b,⻁鈵P䕡䀠८ⱄ홎鄣": { "@?k2鶖㋮\"Oರ K㨇廪儲\u0017䍾J?);\b*묀㗠섳햭1MC V": null, "UIICP!BUA`ᢈ㋸~袩㗪⾒=fB﮴l1ꡛ죘R辂여ҳ7쮡<䩲`熕8頁": 4481809488267626463, "Y?+8먙ᚔ鋳蜩럶1㥔y璜౩`": [ null, 1.2850335807501874E-19, "~V2", 2035406654801997866, { "<숻1>\"": -8062468865199390827, "M㿣E]}qwG莎Gn᝶(ꔙ\\D⬲iꇲs寢t駇S뀡ꢜ": false, "pꝤ㎏9W%>M;-U璏f(^j1?&RB隧 忓b똊E": "#G?C8.躬ꥯ'?냪#< 渟&헿란zpo왓Kj}鷧XﻘMツb䕖;㪻", "vE풤幉xz뱕쫥Ug㦲aH} ᣟp:鬼YᰟH3镔ᴚ斦\\鏑r*2橱G⼔F/.j": true, "RK좬뎂a홠f*f㱉ᮍ⦋潙㨋Gu곌SGI3I뿐\\F',)t`荁蘯囯ﮉ裲뇟쥼_ገ驪▵撏ᕤV": 1.52738225997956557E18, "^k굲䪿꠹B逤%F㱢漥O披M㽯镞竇霒i꼂焅륓\u00059=皫之눃\u2047娤閍銤唫ၕb<\/w踲䔼u솆맚,䝒ᝳ'/it": "B餹饴is権ꖪ怯ꦂẉဎt\"!凢谵⧿0\\<=(uL䷍刨쑪>俆揓Cy襸Q힆䆭涷<\/ᐱ0ɧ䗾䚹\\ኜ?ꄢᇘ`䴢{囇}᠈䴥X4퓪檄]ꥷ/3謒ሴn+g騍X", "GgG꽬[(嫓몍6\u0004궍宩㙻/>\u0011^辍dT腪hxǑ%ꊇk,8(W⧂結P鬜O": [{ "M㴾c>\\ᓲ\u0019V{>ꤩ혙넪㭪躂TS-痴໸闓⍵/徯O.M㏥ʷD囎⧔쁳휤T??鉬뇙=#ꢫ숣BX䭼<\/d똬졬g榿)eꨋﯪ좇첻\u001a\u0011\";~쓆BH4坋攊7힪", "iT:L闞椕윚*滛gI≀Wਟඊ'ꢆ縺뱹鮚Nꩁ᧬蕼21줧\\䋯``⍐\\㏱鳨": 1927052677739832894, "쮁缦腃g]礿Y㬙 fヺSɪ꾾N㞈": [ null, null, { "!t,灝Y 1䗉罵?c饃호䉂Cᐭ쒘z(즽sZG㬣sഖE4뢜㓕䏞丮Qp簍6EZឪ겛fx'ꩱQ0罣i{k锩*㤴㯞r迎jTⲤ渔m炅肳": [ -3.3325685522591933E18, [{"㓁5]A䢕1룥BC?Ꙍ`r룔Ⳛ䙡u伲+\u0001്o": [ null, 4975309147809803991, null, null, {"T팘8Dﯲ稟MM☻㧚䥧/8ﻥ⥯aXLaH\"顾S☟耲ît7fS෉놁뮔/ꕼ䓈쁺4\\霶䠴ᩢ<\/t4?죵>uD5➶༆쉌럮⢀秙䘥\u20972ETR3濡恆vB? ~鸆\u0005": { "`閖m璝㥉b뜴?Wf;?DV콜\u2020퍉౓擝宏ZMj3mJ먡-傷뱙yח㸷꥿ ໘u=M읝!5吭L4v\\?ǎ7C홫": null, "|": false, "~Ztᛋ䚘\\擭㗝傪W陖+㗶qᵿ蘥ᙄp%䫎)}=⠔6ᮢS湟-螾-mXH?cp": 448751162044282216, "\u209fad놹j檋䇌ᶾ梕㉝bוּ": {"?苴ꩠD䋓帘5騱qﱖPF?☸珗顒yU ᡫcb䫎 S@㥚gꮒ쎘泴멖\\:I鮱TZ듒ᶨQ3+f7캙\"?\f풾\\o杞紟﻽M.⏎靑OP": [ -2.6990368911551596E18, [{"䒖@<᰿<\/⽬tTr腞&G%᳊秩蜰擻f㎳?S㵧\r*k뎾-乢겹隷j軛겷0룁鮁": {")DO0腦:춍逿:1㥨่!蛍樋2": [{ ",ꌣf侴笾m๫ꆽ?1?U?\u0011ꌈꂇ": { "x捗甠nVq䅦w`CD⦂惺嘴0I#vỵ} \\귂S끴D얾?Ԓj溯\"v餄a": { "@翙c⢃趚痋i\u0015OQ⍝lq돆Y0pࢥ3쉨䜩^<8g懥0w)]䊑n洺o5쭝QL댊랖L镈Qnt⪟㒅십q헎鳒⮤眉ᔹ梠@O縠u泌ㄘb榚癸XޔFtj;iC": false, "I&뱋゘|蓔䔕측瓯%6ᗻHW\\N1貇#?僐ᗜgh᭪o'䗈꽹Rc욏/蔳迄༝!0邔䨷푪8疩)[쭶緄㇈୧ፐ": { "B+:ꉰ`s쾭)빼C羍A䫊pMgjdx䐝Hf9᥸W0!C樃'蘿f䫤סи\u0017Jve? 覝f둀⬣퓉Whk\"஼=չﳐ皆笁BIW虨쫓F廰饞": -642906201042308791, "sb,XcZ<\/m㉹ ;䑷@c䵀s奤⬷7`ꘖ蕘戚?Feb#輜}p4nH⬮eKL트}": [ "RK鳗z=袤Pf|[,u욺", "Ẏᏻ罯뉋⺖锅젯㷻{H䰞쬙-쩓D]~\u0013O㳢gb@揶蔉|kᦂ❗!\u001ebM褐sca쨜襒y⺉룓", null, null, true, -1.650777344339075E-19, false, "☑lꄆs힨꤇]'uTന⌳농].1⋔괁沰\"IWഩ\u0019氜8쟇䔻;3衲恋,窌z펏喁횗?4?C넁问?ᥙ橭{稻Ⴗ_썔", "n?]讇빽嗁}1孅9#ꭨ靶v\u0014喈)vw祔}룼쮿I", -2.7033457331882025E18, { ";⚃^㱋x:饬ኡj'꧵T☽O㔬RO婎?향ᒭ搩$渣y4i;(Q>꿘e8q": "j~錘}0g;L萺*;ᕭꄮ0l潛烢5H▄쳂ꏒוֹꙶT犘≫x閦웧v", "~揯\u2018c4職렁E~ᑅቚꈂ?nq뎤.:慹`F햘+%鉎O瀜쟏敛菮⍌浢<\/㮺紿P鳆ࠉ8I-o?#jﮨ7v3Dt赻J9": null, "ࣝW䌈0ꍎqC逖,횅c၃swj;jJS櫍5槗OaB>D踾Y": {"㒰䵝F%?59.㍈cᕨ흕틎ḏ㋩B=9IېⓌ{:9.yw}呰ㆮ肒᎒tI㾴62\"ዃ抡C﹬B<\/촋jo朣", [ -7675533242647793366, {"ᙧ呃:[㒺쳀쌡쏂H稈㢤\u001dᶗGG-{GHྻຊꡃ哸䵬;$?&d\\⥬こN圴됤挨-'ꕮ$PU%?冕눖i魁q騎Q": [ false, [[ 7929823049157504248, [[ true, "Z菙\u0017'eꕤ᱕l,0\\X\u001c[=雿8蠬L<\/낲긯W99g톉4ퟋb㝺\u0007劁'!麕Q궈oW:@X၎z蘻m絙璩귓죉+3柚怫tS捇蒣䝠-擶D[0=퉿8)q0ٟ", "唉\nFA椭穒巯\\䥴䅺鿤S#b迅獘 ﶗ꬘\\?q1qN犠pX꜅^䤊⛤㢌[⬛휖岺q唻ⳡ틍\"㙙Eh@oA賑㗠y必Nꊑᗘ", -2154220236962890773, -3.2442003245397908E18, "Wᄿ筠:瘫퀩?o貸q⊻(᎞KWf宛尨h^残3[U(='橄", -7857990034281549164, 1.44283696979059942E18, null, {"ꫯAw跭喀 ?_9\"Aty背F=9缉ྦྷ@;?^鞀w:uN㘢Rỏ": [ 7.393662029337442E15, 3564680942654233068, [ false, -5253931502642112194, "煉\\辎ೆ罍5⒭1䪁䃑s䎢:[e5}峳ﴱn騎3?腳Hyꏃ膼N潭錖,Yᝋ˜YAၓ㬠bG렣䰣:", true, null, { "⒛'P&%죮|:⫶춞": -3818336746965687085, "钖m<\/0ݎMtF2Pk=瓰୮洽겎.": [[ -8757574841556350607, -3045234949333270161, null, { "Ꮬr輳>⫇9hU##w@귪A\\C 鋺㘓ꖐ梒뒬묹㹻+郸嬏윤'+g<\/碴,}ꙫ>손;情d齆J䬁ຩ撛챝탹/R澡7剌tꤼ?ặ!`⏲睤\u00002똥଴⟏": null, "\u20f2ܹe\\tAꥍư\\x当뿖렉禛;G檳ﯪS૰3~㘠#[J<}{奲 5箉⨔{놁<\/釿抋,嚠/曳m&WaOvT赋皺璑텁": [[ false, null, true, -5.7131445659795661E18, "萭m䓪D5|3婁ఞ>蠇晼6nﴺPp禽羱DS<睓닫屚삏姿", true, [ -8759747687917306831, { ">ⓛ\t,odKr{䘠?b퓸C嶈=DyEᙬ@ᴔ쨺芛髿UT퓻春<\/yꏸ>豚W釺N뜨^?꽴﨟5殺ᗃ翐%>퍂ဿ䄸沂Ea;A_\u0005閹殀W+窊?Ꭼd\u0013P汴G5썓揘": 4.342729067882445E-18, "Q^즾眆@AN\u0011Kb榰냎Y#䝀ꀒᳺ'q暇睵s\"!3#I⊆畼寤@HxJ9": false, "⿾D[)袨㇩i]웪䀤ᛰMvR<蟏㣨": {"v퇓L㪱ꖣ豛톤\\곱#kDTN": [{ "(쾴䡣,寴ph(C\"㳶w\"憳2s馆E!n!&柄<\/0Pꈗſ?㿳Qd鵔": {"娇堰孹L錮h嵅⛤躏顒?CglN束+쨣ﺜ\\MrH": {"獞䎇둃ቲ弭팭^ꄞ踦涟XK錆쳞ឌ`;੶S炥騞ଋ褂B៎{ڒ䭷ᶼ靜pI荗虶K$": [{"◖S~躘蒉꫿輜譝Q㽙闐@ᢗ¥E榁iء5┄^B[絮跉ᰥ遙PWi3wㄾⵀDJ9!w㞣ᄎ{듒ꓓb6\\篴??c⼰鶹⟧\\鮇ꮇ": [[ 654120831325413520, -1.9562073916357608E-19, { "DC(昐衵ἡ긙갵姭|֛[t": 7.6979110359897907E18, "J␅))嫼❳9Xfd飉j7猬ᩉ+⤻眗벎E鰉Zᄊ63zၝ69}ZᶐL崭ᦥ⡦靚⋛ꎨ~i㨃咊ꧭo䰠阀3C(": -3.5844809362512589E17, "p꣑팱쒬ꎑ뛡Ꙩ挴恍胔&7ᔈ묒4Hd硶훐㎖zꢼ豍㿢aሃ=<\/湉鵲EӅ%$F!퍶棌孼{O駍਺geu+": ")\u001b잓kŀX쩫A밁®ڣ癦狢)扔弒p}k縕ꩋ,䃉tࣼi", "ァF肿輸<솄G-䢹䛸ꊏl`Tqꕗ蒞a氷⸅ᴉ蠰]S/{J왲m5{9.uέ~㕚㣹u>x8U讁B덺襪盎QhVS맅킃i识{벂磄Iහ䙅xZy/抍૭Z鲁-霳V据挦ℒ": null, "㯛|Nꐸb7ⵐb?拠O\u0014ކ?-(EꞨ4ꕷᄤYᯕOW瞺~螸\"욿ќe㺰\"'㌢ƐW\u0004瞕>0?V鷵엳": true, "뤥G\\迋䠿[庩'꼡\u001aiᩮV쯁ᳪ䦪Ô;倱ନ뛁誈": null, "쥹䄆䚟Q榁䎐᢭<\/2㕣p}HW蟔|䃏꿈ꚉ锳2Pb7㙑Tⅹᵅ": { "Y?֭$>#cVBꩨ:>eL蒁務": { "86柡0po 䏚&-捑Ћ祌<\/휃-G*㶢הּ쩍s㶟餇c걺yu꽎還5*턧簕Og婥SꝐ": null, "a+葞h٥ࠆ裈嗫ﵢ5輙퀟ᛜ,QDﹼ⟶Y騠锪E_|x죗j侵;m蜫轘趥?븅w5+mi콛L": { ";⯭ﱢ!买F⽍柤鶂n䵣V㫚墱2렾ELEl⣆": [ true, -3.6479311868339015E-18, -7270785619461995400, 3.334081886177621E18, 2.581457786298155E18, -6.605252412954115E-20, -3.9232347037744167E-20, { "B6㊕.k1": null, "ZAꄮJ鮷ᳱo갘硥鈠䠒츼": { "ᕅ}럡}.@y陪鶁r業'援퀉x䉴ﵴl퍘):씭脴ᥞhiꃰblﲂ䡲엕8߇M㶭0燋標挝-?PCwe⾕J碻Ᾱ䬈䈥뷰憵賣뵓痬+": {"a췩v礗X⋈耓ፊf罅靮!㔽YYᣓw澍33⎔芲F|\"䜏T↮輦挑6ᓘL侘?ᅥ]덆1R௯✎餘6ꏽ<\/௨\\?q喷ꁫj~@ulq": {"嗫欆뾔Xꆹ4H㌋F嵧]ࠎ]㠖1ꞤT<$m뫏O i댳0䲝i": {"?෩?\u20cd슮|ꯆjs{?d7?eNs⢚嫥氂䡮쎱:鑵롟2hJꎒﯭ鱢3춲亄:뼣v䊭諱Yj択cVmR䩃㘬T\"N홝*ै%x^F\\_s9보zz4淗?q": [ null, "?", 2941869570821073737, "{5{殇0䝾g6밖퍋臩綹R$䖭j紋釰7sXI繳漪행y", false, "aH磂?뛡#惇d婅?Fe,쐘+늵䍘\"3r瘆唊勐j⳧࠴ꇓ<\/唕윈x⬌讣䋵%拗ᛆⰿ妴᝔M2㳗必꧂淲?ゥ젯檢<8끒MidX䏒3᳻Q▮佐UT|⤪봦靏⊏", [[{ "颉(&뜸귙{y^\"P퟉춝Ჟ䮭D顡9=?}Y誱<$b뱣RvO8cH煉@tk~4ǂ⤧⩝屋SS;J{vV#剤餓ᯅc?#a6D,s": [ -7.8781018564821536E16, true, [ -2.28770899315832371E18, false, -1.0863912140143876E-20, -6282721572097446995, 6767121921199223078, -2545487755405567831, false, null, -9065970397975641765, [ -5.928721243413937E-20, {"6촊\u001a홯kB0w撨燠룉{绎6⳹!턍贑y▾鱧ժ[;7ᨷ∀*땒䪮1x霆Hᩭ☔\"r䝐7毟ᝰr惃3ꉭE+>僒澐": [ "Ta쎩aƝt쵯ⰪVb", [ -5222472249213580702, null, -2851641861541559595, null, 4808804630502809099, 5657671602244269874, "5犲﨣4mᥣ?yf젫꾯|䋬잁$`Iⳉﴷ扳兝,'c", false, [ null, { "DyUIN쎾M仼惀⮥裎岶泭lh扠\u001e礼.tEC癯튻@_Qd4c5S熯A<\/\6U윲蹴Q=%푫汹\\\u20614b[௒C⒥Xe⊇囙b,服3ss땊뢍i~逇PA쇸1": -2.63273619193485312E17, "Mq꺋貘k휕=nK硍뫞輩>㾆~἞ࡹ긐榵l⋙Hw뮢帋M엳뢯v⅃^": 1877913476688465125, "ᶴ뻗`~筗免⚽টW˃⽝b犳䓺Iz篤p;乨A\u20ef쩏?疊m㝀컩뫡b탔鄃ᾈV(遢珳=뎲ିeF仢䆡谨8t0醄7㭧瘵⻰컆r厡궥d)a阄፷Ed&c﯄伮1p": null, "⯁w4曢\"(欷輡": "\"M᭫]䣒頳B\\燧ࠃN㡇j姈g⊸⺌忉ꡥF矉স%^", "㣡Oᄦ昵⫮Y祎S쐐級㭻撥>{I$": -378474210562741663, "䛒掷留Q%쓗1*1J*끓헩ᦢ﫫哉쩧EↅIcꅡ\\?ⴊl귛顮4": false, "寔愆샠5]䗄IH贈=d﯊/偶?ॊn%晥D視N򗘈'᫂⚦|X쵩넽z질tskxDQ莮Aoﱻ뛓": true, "钣xp?&\u001e侉/y䴼~?U篔蘚缣/I畚?Q绊": -3034854258736382234, "꺲໣眀)⿷J暘pИfAV삕쳭Nꯗ4々'唄ⶑ伻㷯騑倭D*Ok꧁3b␽_<\/챣Xm톰ၕ䆄`*fl㭀暮滠毡?": [ "D男p`V뙸擨忝븪9c麺`淂⢦Yw⡢+kzܖ\fY1䬡H歁)벾Z♤溊-혰셢?1<-\u0005;搢Tᐁle\\ᛵߓﭩ榩訝-xJ;巡8깊蠝ﻓU$K": { "Vꕡ諅搓W=斸s︪vﲜ츧$)iꡟ싉e寳?ጭムVથ嵬i楝Fg<\/Z|៪ꩆ-5'@ꃱ80!燱R쇤t糳]罛逇dṌ֣XHiͦ{": true, "Ya矲C멗Q9膲墅携휻c\\딶G甔<\/.齵휴": -1.1456247877031811E-19, "z#.OO￝J": -8263224695871959017, "崍_3夼ᮟ1F븍뽯ᦓ鴭V豈Ь": [{ "N蒬74": null, "yuB?厅vK笗!ᔸcXQ旦컶P-녫mᄉ麟_": "1R@ 톘xa_|﩯遘s槞d!d껀筤⬫薐焵먑D{\\6k共倌☀G~AS_D\"딟쬚뮥馲렓쓠攥WTMܭ8nX㩴䕅檹E\u0007ﭨN 2 ℆涐ꥏ꠵3▙玽|됨_\u2048", "恐A C䧩G": {":M큣5e들\\ꍀ恼ᔄ靸|I﨏$)n": { "|U䬫㟯SKV6ꛤ㗮\bn봻䲄fXT:㾯쳤'笓0b/ೢC쳖?2浓uO.䰴": "ཐ꼋e?``,ᚇ慐^8ꜙNM䂱\u0001IᖙꝧM'vKdꌊH牮r\\O@䊷ᓵ쀆(fy聻i툺\"?<\/峧ࣞ⓺ᤤ쵒߯ꎺ騬?)刦\u2072l慪y꺜ﲖTj+u", "뽫hh䈵w>1ⲏ쐭V[ⅎ\\헑벑F_㖝⠗㫇h恽;῝汰ᱼ瀖J옆9RR셏vsZ柺鶶툤r뢱橾/ꉇ囦FGm\"謗ꉦ⨶쒿⥡%]鵩#ᖣ_蹎 u5|祥?O", null, 2.0150326776036215E-19, null, true, false, true, {"\fa᭶P捤WWc᠟f뚉ᬏ퓗ⳀW睹5:HXH=q7x찙X$)모r뚥ᆟ!Jﳸf": [ -2995806398034583407, [ 6441377066589744683, "Mﶒ醹i)Gἦ廃s6몞 KJ౹礎VZ螺费힀\u0000冺업{谥'꡾뱻:.ꘘ굄奉攼Di᷑K鶲y繈욊阓v㻘}枭캗e矮1c?휐\"4\u0005厑莔뀾墓낝⽴洗ṹ䇃糞@b1\u0016즽Y轹", { "1⽕⌰鉟픏M㤭n⧴ỼD#%鐘⊯쿼稁븣몐紧ᅇ㓕ᛖcw嬀~ഌ㖓(0r⧦Q䑕髍ര铂㓻R儮\"@ꇱm❈௿᦯頌8}㿹犴?xn잆꥽R": 2.07321075750427366E18, "˳b18㗈䃟柵Z曆VTAu7+㛂cb0﯑Wp執<\/臋뭡뚋刼틮荋벲TLP预庰܈G\\O@VD'鱃#乖끺*鑪ꬳ?Mޞdﭹ{␇圯쇜㼞顄︖Y홡g": [{ "0a,FZ": true, "2z̬蝣ꧦ驸\u0006L↛Ḣ4๚뿀'?lcwᄧ㐮!蓚䃦-|7.飑挴.樵*+1ﮊ\u0010ꛌ%貨啺/JdM:똍!FBe?鰴㨗0O财I藻ʔWA᫓G쳛u`<\/I": [{ "$τ5V鴐a뾆両環iZp頻යn븃v": -4869131188151215571, "*즢[⦃b礞R◚nΰꕢH=귰燙[yc誘g䆌?ଜ臛": { "洤湌鲒)⟻\\䥳va}PeAMnN[": "㐳ɪ/(軆lZR,Cp殍ȮN啷\"3B婴?i=r$펽ᤐ쀸", "阄R4㒿㯔ڀ69ZᲦ2癁핌噗P崜#\\-쭍袛&鐑/$4童V꩑_ZHA澢fZ3": {"x;P{긳:G閉:9?活H": [ "繺漮6?z犞焃슳\">ỏ[Ⳛ䌜녏䂹>聵⼶煜Y桥[泥뚩MvK$4jtロ", "E#갶霠좭㦻ୗ먵F+䪀o蝒ba쮎4X㣵 h", -335836610224228782, null, null, [ "r1᫩0>danjY짿bs{", [ -9.594464059325631E-23, 1.0456894622831624E-20, null, 5.803973284253454E-20, -8141787905188892123, true, -4735305442504973382, 9.513150514479281E-20, "7넳$螔忷㶪}䪪l짴\u0007鹁P鰚HF銏ZJﳴ/⍎1ᷓ忉睇ᜋ쓈x뵠m䷐窥Ꮤ^\u0019ᶌ偭#ヂt☆၃pᎍ臶䟱5$䰵&๵分숝]䝈뉍♂坎\u0011<>", "C蒑貑藁lﰰ}X喇몛;t밿O7/᯹f\u0015kI嘦<ዴ㟮ᗎZ`GWퟩ瑹࡮ᅴB꿊칈??R校s脚", { "9珵戬+AU^洘拻ቒy柭床'粙XG鞕᠜繀伪%]hC,$輙?Ut乖Qm떚W8઼}~q⠪rU䤶CQ痗ig@#≲t샌f㈥酧l;y闥ZH斦e⸬]j⸗?ঢ拻퀆滌": null, "畯}㧢J罚帐VX㨑>1ꢶkT⿄蘥㝑o|<嗸層沈挄GEOM@-䞚䧰$만峬輏䠱V✩5宸-揂D'㗪yP掶7b⠟J㕻SfP?d}v㼂Ꮕ'猘": { "陓y잀v>╪": null, "鬿L+7:됑Y=焠U;킻䯌잫!韎ஔ\f": { "駫WmGጶ": { "\\~m6狩K": -2586304199791962143, "ႜࠀ%͑l⿅D.瑢Dk%0紪dḨTI픸%뗜☓s榗኉\"?V籄7w髄♲쟗翛歂E䤓皹t ?)ᄟ鬲鐜6C": { "_췤a圷1\u000eB-XOy缿請∎$`쳌eZ~杁튻/蜞`塣৙\"⪰\"沒l}蕌\\롃荫氌.望wZ|o!)Hn獝qg}": null, "kOSܧ䖨钨:಼鉝ꭝO醧S`십`ꓭ쭁ﯢN&Et㺪馻㍢ⅳ㢺崡ຊ蜚锫\\%ahx켨|ż劻ꎄ㢄쐟A躊᰹p譞綨Ir쿯\u0016ﵚOd럂*僨郀N*b㕷63z": { ":L5r+T㡲": [{ "VK泓돲ᮙRy㓤➙Ⱗ38oi}LJቨ7Ó㹡৘*q)1豢⛃e᫛뙪壥镇枝7G藯g㨛oI䄽 孂L缊ꋕ'EN`": -2148138481412096818, "`⛝ᘑ$(खꊲ⤖ᄁꤒ䦦3=)]Y㢌跨NĴ驳줟秠++d孳>8ᎊ떩EꡣSv룃 쯫أ?#E|᭙㎐?zv:5祉^⋑V": [ -1.4691944435285607E-19, 3.4128661569395795E17, "㐃촗^G9佭龶n募8R厞eEw⺡_ㆱ%⼨D뉄퉠2ꩵᛅⳍ搿L팹Lවn=\"慉념ᛮy>!`g!풲晴[/;?[v겁軇}⤳⤁핏∌T㽲R홓遉㓥", "愰_⮹T䓒妒閤둥?0aB@㈧g焻-#~跬x<\/舁P݄ꐡ=\\׳P\u0015jᳪᢁq;㯏l%᭗;砢觨▝,謁ꍰGy?躤O黩퍋Y㒝a擯\n7覌똟_䔡]fJ晋IAS", 4367930106786121250, -4.9421193149720582E17, null, { ";ᄌ똾柉곟ⰺKpፇ䱻ฺ䖝{o~h!eꁿ઻욄ښ\u0002y?xUd\u207c悜ꌭ": [ 1.6010824122815255E-19, [ "宨︩9앉檥pr쇷?WxLb", "氇9】J玚\u000f옛呲~ 輠1D嬛,*mW3?n휂糊γ虻*ᴫ꾠?q凐趗Ko↦GT铮", "㶢ថmO㍔k'诔栀Z蛟}GZ钹D", false, -6.366995517736813E-20, -4894479530745302899, null, "V%᫡II璅䅛䓎풹ﱢ/pU9se되뛞x梔~C)䨧䩻蜺(g㘚R?/Ự[忓C뾠ࢤc왈邠买?嫥挤풜隊枕", ",v碍喔㌲쟚蔚톬៓ꭶ", 3.9625444752577524E-19, null, [ "kO8란뿒䱕馔b臻⍟隨\"㜮鲣Yq5m퐔K#ꢘug㼈ᝦ=P^6탲@䧔%$CqSw铜랊0&m⟭<\/a逎ym\u0013vᯗ": true, "洫`|XN뤮\u0018詞=紩鴘_sX)㯅鿻Ố싹": 7.168252736947373E-20, "ꛊ饤ﴏ袁(逊+~⽫얢鈮艬O힉7D筗S곯w操I斞᠈븘蓷x": [[[[ -7.3136069426336952E18, -2.13572396712722688E18, { "硢3㇩R:o칢行E<=\u0018ၬYuH!\u00044U%卝炼2>\u001eSi$⓷ꒈ'렢gᙫ番ꯒ㛹럥嶀澈v;葷鄕x蓎\\惩+稘UEᖸﳊ㊈壋N嫿⏾挎,袯苷ኢ\\x|3c": 7540762493381776411, "?!*^ᢏ窯?\u0001ڔꙃw虜돳FgJ?&⨫*uo籤:?}ꃹ=ٴ惨瓜Z媊@ત戹㔏똩Ԛ耦Wt轁\\枒^\\ꩵ}}}ꀣD\\]6M_⌫)H豣:36섘㑜": { ";홗ᰰU஋㙛`D왔ཿЃS회爁\u001b-㢈`봆?盂㛣듿ᦾ蒽_AD~EEຆ㊋(eNwk=Rɠ峭q\"5Ἠ婾^>'ls\n8QAK)- Q䲌mo펹L_칍樖庫9꩝쪹ᘹ䑖瀍aK ?*趤f뭓廝p=磕", "哑z懅ᤏ-ꍹux쀭", [ true, 3998739591332339511, "ጻ㙙?᳸aK<\/囩U`B3袗ﱱ?\"/k鏔䍧2l@쿎VZ쨎/6ꃭ脥|B?31+on颼-ꮧ,O嫚m ࡭`KH葦:粘i]aSU쓙$쐂f+詛頖b", [{"^<9<箝&絡;%i﫡2攑紴\\켉h쓙-柂䚝ven\u20f7浯-Ꮏ\r^훁䓚헬\u000e?\\ㅡֺJ떷VOt": [{ "-௄卶k㘆혐஽y⎱㢬sS઄+^瞥h;ᾷj;抭\u0003밫f<\/5Ⱗ裏_朻%*[-撵䷮彈-芈": { "㩩p3篊G|宮hz䑊o곥j^Co0": [ 653239109285256503, {"궲?|\":N1ۿ氃NZ#깩:쇡o8킗ࡊ[\"됸Po핇1(6鰏$膓}⽐*)渽J'DN<썙긘毦끲Ys칖": { "2Pr?Xjㆠ?搮/?㓦柖馃5뚣Nᦼ|铢r衴㩖\"甝湗ܝ憍": "\"뾯i띇筝牻$珲/4ka $匝휴译zbAᩁꇸ瑅&뵲衯ꎀᆿ7@ꈋ'ᶨH@ᠴl+", "7뢽뚐v?4^ꊥ_⪛.>pởr渲<\/⢕疻c\"g䇘vU剺dஔ鮥꒚(dv祴X⼹\\a8y5坆": true, "o뼄B욞羁hr﷔폘뒚⿛U5pꪴfg!6\\\"爑쏍䢱W<ﶕ\\텣珇oI/BK뺡'谑♟[Ut븷亮g(\"t⡎有?ꬊ躺翁艩nl F⤿蠜": 1695826030502619742, "ۊ깖>ࡹ햹^ⵕ쌾BnN〳2C䌕tʬ]찠?ݾ2饺蹳ぶꌭ訍\"◹ᬁD鯎4e滨T輀ﵣ੃3\u20f3킙D瘮g\\擦+泙ၧ 鬹ﯨַ肋7놷郟lP冝{ߒhড়r5,꓋": null, "ΉN$y{}2\\N﹯ⱙK'8ɜͣwt,.钟廣䎘ꆚk媄_": null, "䎥eᾆᝦ읉,Jުn岪㥐s搖謽䚔5t㯏㰳㱊ZhD䃭f絕s鋡篟a`Q鬃┦鸳n_靂(E4迠_觅뷝_宪D(NL疶hL追V熑%]v肫=惂!㇫5⬒\u001f喺4랪옑": { "2a輍85먙R㮧㚪Sm}E2yꆣꫨrRym㐱膶ᔨ\\t綾A☰.焄뙗9<쫷챻䒵셴᭛䮜.<\/慌꽒9叻Ok䰊Z㥪幸k": [ null, true, {"쌞쐍": { "▟GL K2i뛱iQ\"̠.옛1X$}涺]靎懠ڦ늷?tf灟ݞゟ{": 1.227740268699265E-19, "꒶]퓚%ฬK❅": [{ "(ෛ@Ǯっ䧼䵤[aテൖvEnAdU렖뗈@볓yꈪ,mԴ|꟢캁(而첸죕CX4Y믅": "2⯩㳿ꢚ훀~迯?᪑\\啚;4X\u20c2襏B箹)俣eỻw䇄", "75༂f詳䅫ꐧ鏿 }3\u20b5'∓䝱虀f菼Iq鈆﨤g퍩)BFa왢d0뮪痮M鋡nw∵謊;ꝧf美箈ḋ*\u001c`퇚퐋䳫$!V#N㹲抗ⱉ珎(V嵟鬒_b㳅\u0019": null, "e_m@(i㜀3ꦗ䕯䭰Oc+-련0뭦⢹苿蟰ꂏSV䰭勢덥.ྈ爑Vd,ᕥ=퀍)vz뱊ꈊB_6듯\"?{㒲&㵞뵫疝돡믈%Qw限,?\r枮\"? N~癃ruࡗdn&": null, "㉹&'Pfs䑜공j<\/?|8oc᧨L7\\pXᭁ 9᪘": -2.423073789014103E18, "䝄瑄䢸穊f盈᥸,B뾧푗횵B1쟢f\u001f凄": "魖⚝2儉j꼂긾껢嗎0ࢇ纬xI4](੓`蕞;픬\fC\"斒\")2櫷I﹥迧", "ퟯ詔x悝령+T?Bg⥄섅kOeQ큼㻴*{E靼6氿L缋\u001c둌๶-㥂2==-츫I즃㠐Lg踞ꙂEG貨鞠\"\u0014d'.缗gI-lIb䋱ᎂDy缦?": null, "紝M㦁犿w浴詟棓쵫G:䜁?V2ힽ7N*n&㖊Nd-'ຊ?-樹DIv⊜)g䑜9뉂ㄹ푍阉~ꅐ쵃#R^\u000bB䌎䦾]p.䀳": [{"ϒ爛\"ꄱ︗竒G䃓-ま帳あ.j)qgu扐徣ਁZ鼗A9A鸦甈!k蔁喙:3T%&㠘+,䷞|챽v䚞문H<\/醯r셓㶾\\a볜卺zE䝷_죤ဵ뿰᎟CB": [ 6233512720017661219, null, -1638543730522713294, false, -8901187771615024724, [ 3891351109509829590, true, false, -1.03836679125188032E18, { "j랎:g曞ѕᘼ}链N", -1.1103819473845426E-19, true, [ true, null, -7.9091791735309888E17, true, {"}蔰鋈+ꐨ啵0?g*사%`J?*": [{ "\"2wG?yn,癷BK\\龞䑞x?蠢": -3.7220345009853505E-19, ";饹়❀)皋`噿焒j(3⿏w>偍5X薙婏聿3aFÆÝ": "2,ꓴg?_섦_>Y쪥션钺;=趘F~?D㨫\bX?㹤+>/믟kᠪ멅쬂Uzỵ]$珧`m雁瑊ඖ鯬cꙉ梢f묛bB", "♽n$YjKiXX*GO贩鏃豮祴遞K醞眡}ꗨv嵎꼷0୸+M菋eH徸J꣆:⼐悥B켽迚㯃b諂\u000bjꠜ碱逮m8": [ "푷᣺ﻯd8ﱖ嬇ភH鹎⡱᱅0g:果6$GQ췎{vᷧYy-脕x偹砡館⮸C蓼ꏚ=軄H犠G谖ES詤Z蠂3l봟hᅭ7䦹1GPQG癸숟~[#駥8zQ뛣J소obg,", null, 1513751096373485652, null, -6.851466660824754E-19, {"䩂-⴮2ٰK솖풄꾚ႻP앳1H鷛wmR䗂皎칄?醜<\/&ࠧ㬍X濬䵈K`vJ륒Q/IC묛!;$vϑ": { "@-ꚗxྐྵ@m瘬\u0010U絨ﮌ驐\\켑寛넆T=tQ㭤L연@脸삯e-:⩼u㎳VQ㋱襗ຓ<Ⅶ䌸cML3+\u001e_C)r\\9+Jn\\Pﺔ8蠱檾萅Pq鐳话T䄐I": -1.80683891195530061E18, "ᷭዻU~ཷsgSJ`᪅'%㖔n5픆桪砳峣3獮枾䌷⊰呀": { "Ş੉䓰邟自~X耤pl7间懑徛s첦5ਕXexh⬖鎥᐀nNr(J컗|ૃF\"Q겮葲놔엞^겄+㈆话〾희紐G'E?飕1f❼텬悚泬먐U睬훶Qs": false, "(\u20dag8큽튣>^Y{뤋.袊䂓;_g]S\u202a꽬L;^'#땏bႌ?C緡<䝲䲝断ꏏ6\u001asD7IK5Wxo8\u0006p弊⼂ꯍ扵\u0003`뵂픋%ꄰ⫙됶l囏尛+䗅E쟇\\": [ true, { "\n鱿aK㝡␒㼙2촹f;`쾏qIࡔG}㝷䐍瓰w늮*粅9뒪ㄊCj倡翑閳R渚MiUO~仨䜶RꙀA僈㉋⦋n{㖥0딿벑逦⥻0h薓쯴Ꝼ": [ 5188716534221998369, 2579413015347802508, 9.010794400256652E-21, -6.5327297761238093E17, 1.11635352494065523E18, -6656281618760253655, { "": ")?", "TWKLꑙ裑꺔UE俸塑炌Ũ᜕-o\"徚#": {"M/癟6!oI51ni퐚=댡>xꍨ\u0004 ?": { "皭": {"⢫䋖>u%w잼<䕏꘍P䋵$魋拝U䮎緧皇Y훂&|羋ꋕ잿cJ䨈跓齳5\u001a삱籷I꿾뤔S8㌷繖_Yឯ䲱B턼O歵F\\l醴o_欬6籏=D": [ false, true, {"Mt|ꏞD|F궣MQ뵕T,띺k+?㍵i": [ 7828094884540988137, false, { "!༦鯠,&aﳑ>[euJꏽ綷搐B.h": -7648546591767075632, "-n켧嘰{7挐毄Y,>❏螵煫乌pv醑Q嶚!|⌝責0왾덢ꏅ蛨S\\)竰'舓Q}A釡5#v": 3344849660672723988, "8閪麁V=鈢1녈幬6棉⪮둌\u207d᚛驉ꛃ'r䆉惏ै|bἧﺢᒙ<=穊强s혧eꮿ慩⌡ \\槳W븧J檀C,ᘉ의0俯퀉M;筷ࣴ瓿{늊埂鄧_4揸Nn阼Jੵ˥(社": true, "o뼀vw)4A뢵(a䵢)p姃뛸\u000fK#KiQp\u0005ꅍ芅쏅": null, "砥$ꥸ┇耽u斮Gc{z빔깎밇\\숰\u001e괷各㶇쵿_ᴄ+h穢p촀Ნ䃬z䝁酳ӂ31xꔄ1_砚W렘G#2葊P ": [ -3709692921720865059, null, [ 6669892810652602379, -135535375466621127, "뎴iO}Z? 馢녱稹ᄾ䐩rSt帤넆&7i騏멗畖9誧鄜'w{Ͻ^2窭외b㑎粖i矪ꦨ탪跣)KEㆹ\u0015V8[W?⽉>'kc$䨘ᮛ뉻٬M5", 1.10439588726055846E18, false, -4349729830749729097, null, [ false, "_蠢㠝^䟪/D녒㡋ỎC䒈판\u0006એq@O펢%;鹐쏌o戥~A[ꡉ濽ỳ&虃᩾荣唙藍茨Ig楡꒻M窓冉?", true, 2.17220752996421728E17, -5079714907315156164, -9.960375974658589E-20, "ᾎ戞༒", true, false, [[ "ⶉᖌX⧕홇)g엃⹪x뚐癟\u0002", -5185853871623955469, { "L㜤9ợㇶK鐰⋓V뽋˖!斫as|9"፬䆪?7胜&n薑~": -2.11545634977136992E17, "O8뀩D}캖q萂6༣㏗䈓煮吽ਆᎼDᣘ폛;": false, "YTᡅ^L㗎cbY$pᣞ縿#fh!ꘂb삵玊颟샞ဢ$䁗鼒몁~rkH^:닮먖츸륈⪺쒉砉?㙓扫㆕꣒`R䢱B酂?C뇞<5Iޚ讳騕S瞦z": null, "\\RB?`mG댵鉡幐物䵎有5*e骄T㌓ᛪ琾駒Ku\u001a[柆jUq8⋈5鿋츿myﻗ?雍ux঴?": 5828963951918205428, "n0晅:黯 xu씪^퓞cB㎊ᬍ⺘٤փ~B岚3㥕擄vᲂ~F?C䶖@$m~忔S왖㲚?챴⊟W#벌{'㰝I䝠縁s樘\\X뢻9핡I6菍ㄛ8쯶]wॽ0L\"q": null, "x增줖j⦦t䏢᎙㛿Yf鼘~꫓恄4惊\u209c": "oOhbᤃ᛽z&Bi犑\\3B㩬劇䄑oŁ쨅孥멁ຖacA㖫借㞝vg싰샂㐜#譞⢤@k]鋰嘘䜾L熶塥_<\/⍾屈ﮊ_mY菹t뙺}Ox=w鮮4S1ꐩמּ'巑", "㗓蟵ꂾe蠅匳(JP䗏෸\u0089耀왲": [{ "ᤃ㵥韎뤽\r?挥O쯡⇔㞚3伖\u0005P⋪\"D궣QLn(⚘罩䩢Ŏv䤘尗뼤됛O淽鋋闚r崩a{4箙{煷m6〈": { "l곺1L": { "T'ਤ?砅|੬Km]䄩\"(࿶<\/6U爢䫈倔郴l2㴱^줣k'L浖L鰄Rp今鎗⒗C얨M훁㡧ΘX粜뫈N꤇輊㌻켑#㮮샶-䍗룲蠝癜㱐V>=\\I尬癤t=": 7648082845323511446, "鋞EP:<\/_`ၧe混ㇹBd⯢㮂驋\\q碽饩跓྿ᴜ+j箿렏㗑yK毢宸p謹h䦹乕U媣\\炤": [[ "3", [ true, 3.4058271399411134E-20, true, "揀+憱f逮@먻BpW曉\u001a㣐⎊$n劈D枤㡞좾\u001aᛁ苔౩闝1B䷒Ṋ݋➐ꀞꐃ磍$t੤_:蘺⮼(#N", 697483894874368636, [ "vᘯ锴)0訶}䳅⩚0O壱韈ߜ\u0018*U鍾䏖=䧉뽑单휻ID쿇嘗?ꌸῬ07", -5.4858784319382006E18, 7.5467775182251151E18, -8911128589670029195, -7531052386005780140, null, [ null, true, [[{ "1欯twG<\/Q:0怯押殃탷聫사<ỗꕧ蚨䡁nDꌕ\u001c녬~蓩鲃g儊>ꏡl㻿/⑷*챳6㻜W毤緛ﹺᨪ4\u0013뺚J髬e3쳸䘦伧?恪&{L掾p+꬜M䏊d娘6": { "2p첼양棜h䜢﮶aQ*c扦v︥뮓kC寵횂S銩&ǝ{O*य़iH`U큅ࡓr䩕5ꄸ?`\\᧫?ᮼ?t〟崾훈k薐ì/iy꤃뵰z1<\/AQ#뿩8jJ1z@u䕥": 1.82135747285215155E18, "ZdN &=d년ᅆ'쑏ⅉ:烋5&៏ᄂ汎来L㯄固{钧u\\㊏튚e摑&t嗄ꖄUb❌?m䴘熚9EW": [{ "ଛ{i*a(": -8.0314147546006822E17, "⫾ꃆY\u000e+W`௸ \"M뒶+\\뷐lKE}(NT킶Yj選篒쁶'jNQ硾(똡\\\"逌ⴍy? IRꜘ὞鄬﨧:M\\f⠋Cꚜ쫊ᚴNV^D䕗ㅖἔIao꿬C⍏8": [ 287156137829026547, { "H丞N逕⯲": {"": { "7-;枮阕梒9ᑄZ": [[[[ null, { "": [[[[ -7.365909561486078E-19, 2948694324944243408, null, [ true, "荒\"并孷䂡쵼9o䀘F\u0002龬7⮹Wz%厖/*? a*R枈㌦됾g뒠䤈q딄㺿$쮸tᶎ릑弣^鏎<\/Y鷇驜L鿽<\/춋9Mᲆឨ^<\/庲3'l낢", "c鮦\u001b두\\~?眾ಢu݆綑෪蘛轋◜gȃ<\/ⴃcpkDt誩܅\"Y", [[ null, null, [ 3113744396744005402, true, "v(y", { "AQ幆h쾜O+꺷铀ꛉ練A蚗⼺螔j㌍3꽂楎䥯뎸먩?": null, "蠗渗iz鱖w]擪E": 1.2927828494783804E-17, "튷|䀭n*曎b✿~杤U]Gz鄭kW|㴚#㟗ഠ8u擨": [[ true, null, null, {"⾪壯톽g7?㥜ώQꑐ㦀恃㧽伓\\*᧰閖樧뢇赸N휶䎈pI氇镊maᬠ탷#X?A+kНM ༑᩟؝?5꧎鰜ṚY즫궔 =ঈ;ﳈ?*s|켦蜌wM笙莔": [ null, -3808207793125626469, [ -469910450345251234, 7852761921290328872, -2.7979740127017492E18, 1.4458504352519893E-20, true, "㽙깹?먏䆢:䴎ۻg殠JBTU⇞}ꄹꗣi#I뵣鉍r혯~脀쏃#釯:场:䔁>䰮o'㼽HZ擓௧nd", [ 974441101787238751, null, -2.1647718292441327E-19, 1.03602824249831488E18, [ null, 1.0311977941822604E-17, false, true, { "": -3.7019778830816707E18, "E峾恆茍6xLIm縂0n2视֯J-ᤜz+ᨣ跐mYD豍繹⹺䊓몓ﴀE(@詮(!Y膽#᎙2䟓섣A䈀㟎,囪QbK插wcG湎ꤧtG엝x⥏俎j'A一ᯥ뛙6ㅑ鬀": 8999803005418087004, "よ殳\\zD⧅%Y泥簳Uꈩ*wRL{3#3FYHା[d岀䉯T稉駅䞘礄P:闈W怏ElB㤍喬赔bG䠼U଄Nw鰯闀楈ePsDꥷ꭬⊊": [ 6.77723657904486E-20, null, [ "ཚ_뷎꾑蹝q'㾱ꂓ钚蘞慵렜떆`ⴹ⎼櫯]J?[t9Ⓢ !컶躔I᮸uz>3a㠕i,錃L$氰텰@7녫W㸮?羧W뇧ꃞ,N鋮숪2ɼ콏┍䁲6", "&y?뢶=킕올Za惻HZk>c\u20b58i?ꦶcfBv잉ET9j䡡", "im珊Ճb칧校\\뼾쯀", 9.555715121193197E-20, true, { "<㫚v6腓㨭e1㕔&&V∌ᗈT奄5Lጥ>탤?튣瑦㳆ꉰ!(ᙪ㿬擇_n쌯IMΉ㕨␰櫈ᱷ5풔蟹&L.첽e鰷쯃劼﫭b#ﭶ퓀7뷄Wr㢈๧Tʴશ㶑澕鍍%": -1810142373373748101, "fg晌o?߲ꗄ;>C>?=鑰監侯Kt굅": true, "䫡蓺ꑷ]C蒹㦘\"1ః@呫\u0014NL䏾eg呮፳,r$裢k>/\\?ㄤᇰﻛ쉕1஥'Ċ\" \\_?쨔\"ʾr: 9S䘏禺ᪧꄂ㲄", [[{ "*硙^+E쌺I1䀖ju?:⦈Ꞓl๴竣迃xKC/饉:\fl\"XTFᄄ蟭,芢<\/骡軺띜hꏘ\u001f銿<棔햳▨(궆*=乥b8\\媦䷀뫝}닶ꇭ(Kej䤑M": [{ "1Ꮼ?>옿I╅C<ގ?ꊌ冉SV5A㢊㶆z-๎玶绢2F뵨@㉌뀌o嶔f9-庒茪珓뷳4": null, ";lᰳ": "CbB+肻a䄷苝*/볳+/4fq=㰁h6瘉샴4铢Y骐.⌖@哼猎㦞+'gꋸ㒕ߤ㞑(䶒跲ti⑴a硂#No볔", "t?/jE幸YHT셵⩎K!Eq糦ꗣv刴w\"l$ο:=6:移": { "z]鑪醊嫗J-Xm銌翁絨c里됏炙Ep㣋鏣똼嚌䀓GP﹖cmf4鹭T䅿꣭姧␸wy6ꦶ;S&(}ᎧKxᾂQ|t뻳k\"d6\"|Ml췆hwLt꼼4$&8Պ褵婶鯀9": {"嵃닢ᒯ'd᧫䳳#NXe3-붋鸿ଢ떓%dK\u0013䲎ꖍYV.裸R⍉rR3蟛\\:젯:南ĺLʆ넕>|텩鴷矔ꋅⒹ{t孶㓑4_": [ true, null, [ false, "l怨콈lᏒ", { "0w䲏嬧-:`䉅쉇漧\\܂yㄨb%㽄j7ᦶ涶<": 3.7899452730383747E-19, "ꯛTẀq纤q嶏V⿣?\"g}ი艹(쥯B T騠I=仵및X": {"KX6颠+&ᅃ^f畒y[": { "H?뱜^?꤂-⦲1a㋞&ꍃ精Ii᤾챪咽쬘唂쫷<땡劈훫놡o㥂\\ KⴙD秼F氮[{'좴:례晰Iq+I쭥_T綺砸GO煝䟪ᚪ`↹l羉q쐼D꽁ᜅ훦: vUV": true, "u^yﳍ0㱓#[y뜌앸ꊬL㷩?蕶蘾⻍KӼ": -7931695755102841701, "䤬轉車>\u001c鴵惋\"$쯃྆⇻n뽀G氠S坪]ಲꨍ捇Qxኻ椕駔\\9ࣼ﫻읜磡煮뺪ᶚ볝l㕆t+sζ": [[[ true, false, [ null, 3363739578828074923, true, { "\"鸣詩 볰㑵gL㯦῅춝旫}ED辗ﮈI쀤-ꧤ|㠦Z\"娑ᕸ4爏騍㣐\"]쳝Af]茛⬻싦o蚁k䢯䩐菽3廇喑ޅ": 4.5017999150704666E17, "TYႇ7ʠ值4챳唤~Zo&ݛ": false, "`塄J袛㭆끺㳀N㺣`꽐嶥KﯝSVᶔ∲퀠獾N딂X\"ᤏhNﬨvI": {"\u20bb㭘I䖵䰼?sw䂷쇪](泒f\"~;꼪Fԝsᝦ": {"p,'ꉂ軿=A蚶?bƉ㏵䅰諬'LYKL6B깯⋩겦뎙(ᜭ\u0006噣d꾆㗼Z;䄝䚔cd<情@䞂3苼㸲U{)<6&ꩻ钛\u001au〷N숨囖愙j=BXW욕^x芜堏Ῑ爂뛷꒻t✘Q\b": [[ "籛&ଃ䩹.ꃩ㦔\\C颫#暪&!勹ꇶ놽攺J堬镙~軌C'꾖䣹㮅岃ᙴ鵣", 4.317829988264744E15, 6.013585322002147E-20, false, true, null, null, -3.084633632357326E-20, false, null, { "\"짫愔昻 X\"藣j\"\"먁ཅѻ㘤㬯0晲DU꟒㸃d벀윒l䦾c੻*3": null, "谈Wm陧阦咟ฯ歖擓N喴㋐銭rCCnVࢥ^♼Ⅾ젲씗刊S༝+_t赔\\b䚍뉨ꬫ6펛cL䊘᜼<\/澤pF懽&H": [ null, { "W\"HDUuΌ퀟M'P4࿰H똆ⰱﮯ<\/凐蘲\"C鴫ﭒж}ꭩ쥾t5yd诪ﮡ퍉ⴰ@?氐醳rj4I6Qt": 6.9090159359219891E17, "絛ﳛ⺂": {"諰P㗮聦`ZQ?ꫦh*റcb⧱}埌茥h{棩렛툽o3钛5鮁l7Q榛6_g)ὄ\u0013kj뤬^爖eO4Ⱈ槞鉨ͺ订%qX0T썗嫷$?\\\"봅늆'%": [ -2.348150870600346E-19, [[ true, -6619392047819511778, false, [[ -1.2929189982356161E-20, 1.7417192219309838E-19, {"?嵲2࿐2\u0001啑㷳c縯": [ null, [ false, true, 2578060295690793218, { "?\"殃呎#㑑F": true, "}F炊_殛oU헢兔Ꝉ,赭9703.B数gTz3⏬": { "5&t3,햓Mݸᵣ㴵;꣫䩍↳#@뫷䠅+W-ࣇzᓃ鿕ಔ梭?T䮑ꥬ旴]u뫵막bB讍:왳둛lEh=숾鱠p咐$짏#?g⹷ᗊv㷵.斈u頻\u0018-G.": "뽙m-ouࣤ஫牷\"`Ksꕞ筼3HlȨvC堈\"I]㖡玎r먞#'W賜鴇k'c룼髋䆿飉㗆xg巤9;芔cጐ/ax䊨♢큓r吓㸫೼䢗da᩾\"]屣`", ":M딪<䢥喠\u0013㖅x9蕐㑂XO]f*Q呰瞊吭VP@9,㨣 D\\穎vˤƩs㜂-曱唅L걬/롬j㈹EB8g<\/섩o渀\"u0y&룣": ">氍緩L/䕑돯Ꟙ蕞^aB뒣+0jK⪄瑨痜LXK^힦1qK{淚t츔X:Vm{2r獁B뾄H첚7氥?쉟䨗ꠂv팳圎踁齀\\", "D彤5㢷Gꪻ[lㄆ@὜⓰絳[ଃ獽쮹☒[*0ꑚ㜳": 9022717159376231865, "ҖaV銣tW+$魿\u20c3亜~뫡ᙰ禿쨽㏡fṼzE/h": "5臐㋇Ჯ쮺? 昨탰Wム밎#'\"崲钅U?幫뺀⍾@4kh>騧\\0ҾEV=爐͌U捀%ꉼ 㮋<{j]{R>:gԩL\u001c瀈锌ﯲﳡꚒ'⫿E4暍㌗뵉X\"H᝜", "ᱚגּ;s醒}犍SἿ㦣&{T$jkB\\\tḮ앾䤹o<避(tW": "vb⯽䴪䮢@|)", "⥒퐁껉%惀뗌+녣迺顀q條g⚯i⤭룐M琹j̈́⽜A": -8385214638503106917, "逨ꊶZ<\/W⫟솪㎮ᘇb?ꠔi\"H㧺x෷韒Xꫨฟ|]窽\u001a熑}Agn?Mᶖa9韲4$3Ỵ^=쏍煤ፐ돷2䣃%鷠/eQ9頸쥎", 2398360204813891033, false, 3.2658897259932633E-19, null, "?ꚃ8Nn㞷幵d䲳䱲뀙ꪛQ瑓鎴]䩋-鰾捡䳡??掊", false, -1309779089385483661, "ᦲxu_/yecR.6芏.ᜇ過 ~", -5658779764160586501, "쒌:曠=l썜䢜wk#s蕚\"互㮉m䉤~0듐䋙#G;h숄옥顇෤勹(C7㢅雚㐯L⠅VV簅<", null, -4.664877097240962E18, -4.1931322262828017E18, { ",": { "v㮟麑䄠뤵g{M띮.\u001bzt뢜뵡0Ǥ龍떟Ᾰ怷ϓRT@Lꀌ樂U㏠⾕e扉|bJg(뵒㠶唺~ꂿ(땉x⻫싉쁊;%0鎻V(o\f,N鏊%nk郼螺": -1.73631993428376141E18, "쟧摑繮Q@Rᕾ㭚㾣4隅待㓎3蒟": [ 4971487283312058201, 8973067552274458613, { "`a揙ᣗ\u0015iBo¸": 4.3236479112537999E18, "HW&퉡ぁ圍Y?瑡Qy훍q!帰敏s舠㫸zꚗaS歲v`G株巷Jp6킼 (귶鍔⾏⡈>M汐㞍ቴ꙲dv@i㳓ᇆ?黍": [ null, 4997607199327183467, "E㻎蠫ᐾ高䙟蘬洼旾﫠텛㇛?'M$㣒蔸=A_亀绉앭rN帮", null, [{ "Eᑞ)8餧A5u&㗾q?": [ -1.969987519306507E-19, null, [ 3.42437673373841E-20, true, "e걷M墁\"割P␛퍧厀R䱜3ﻴO퓫r﹉⹊", [ -8164221302779285367, [ true, null, "爘y^-?蘞Ⲽꪓa␅ꍨ}I", 1.4645984996724427E-19, [{ "tY좗⧑mrzﺝ㿥ⴖ᥷j諅\u0000q賋譁Ꞅ⮱S\nࡣB/큃굪3Zɑ复o<\/;롋": null, "彟h浠_|V4䦭Dᙣ♞u쿻=삮㍦\u001e哀鬌": [{"6횣楠,qʎꗇ鎆빙]㱭R굋鈌%栲j分僅ペ䇰w폦p蛃N溈ꡐꏀ?@(GI뉬$ﮄ9誁ꓚ2e甸ڋ[䁺,\u0011\u001cࢃ=\\+衪䷨ᯕ鬸K": [[ "ㅩ拏鈩勥\u000etgWVXs陂規p狵w퓼{뮵_i\u0002ퟑႢ⬐d6鋫F~챿搟\u0096䚼1ۼ칥0꣯儏=鋷牋ⅈꍞ龐", -7283717290969427831, true, [ 4911644391234541055, { "I鈒첽P릜朸W徨觘-Hᎄ퐟⓺>8kr1{겵䍃〛ᬡ̨O귑o䝕'쿡鉕p5": "fv粖RN瞖蛐a?q꤄\u001d⸥}'ꣴ犿ꦼ?뤋?鵆쥴덋䡫s矷̄?ඣ/;괱絢oWfV<\/\u202cC,㖦0䑾%n賹g&T;|lj_欂N4w", "짨䠗;䌕u i+r๏0": [{"9䥁\\఩8\"馇z䇔<\/ႡY3e狚쐡\"ุ6ﰆZ遖c\"Ll:ꮾ疣<\/᭙O◌납୕湞9⡳Und㫜\u0018^4pj1;䧐儂䗷ୗ>@e톬": { "a⑂F鋻Q螰'<퇽Q贝瀧{ᘪ,cP&~䮃Z?gI彃": [ -1.69158726118025933E18, [ "궂z簽㔛㮨瘥⤜䛖Gℤ逆Y⪾j08Sn昞ꘔ캻禀鴚P謦b{ꓮmN靐Mᥙ5\"睏2냑I\u0011.L&=?6ᄠ뻷X鸌t刑\"#z)o꫚n쳟줋", null, 7517598198523963704, "ኑQp襟`uᩄr方]*F48ꔵn俺ሙ9뇒", null, null, 6645782462773449868, 1219168146640438184, null, { ")ယ넌竀Sd䰾zq⫣⏌ʥ\u0010ΐ' |磪&p牢蔑mV蘸૰짬꺵;K": [ -7.539062290108008E-20, [ true, false, null, true, 6574577753576444630, [[ 1.2760162530699766E-19, [ null, [ "顊\\憎zXB,", [{ "㇆{CVC9-MN㜋ઘR눽#{h@ퟨ!鼚׼XOvXS\u0017ᝣ=cS+梽៲綆16s덽휐y屬?ᇳG2ᴭ\u00054쫖y룇nKcW̭炦s/鰘ᬽ?J|퓀髣n勌\u0010홠P>j": false, "箴": [ false, "鍞j\"ꮾ*엇칬瘫xṬ⭽쩁䃳\"-⋵?ᦽ댎Ĝ": true, "Pg帯佃籛n㔠⭹࠳뷏≻࿟3㞱!-쒾!}쭪䃕!籿n涻J5ਲ਼yvy;Rኂ%ᔡጀ裃;M⣼)쵂쑈": 1.80447711803435366E18, "ꈑC⡂ᑆ㤉壂뎃Xub<\/쀆༈憓ق쨐ק\\": [ 7706977185172797197, {"": {"K╥踮砆NWࡆFy韣7ä밥{|紒︧䃀榫rᩛꦡTSy잺iH8}ퟴ,M?Ʂ勺ᴹ@T@~꾂=I㙕뾰_涀쑜嫴曣8IY?ҿo줫fऒ}\\S\"ᦨ뵼#nDX": { "♘k6?଱癫d68?㽚乳䬳-V顷\u0005蝕?\u0018䞊V{邾zじl]雏k臤~ൖH뒐iꢥ]g?.G碄懺䔛pR$䅒X觨l봜A刊8R梒',}u邩퉕?;91Ea䈈믁G⊶芔h袪&廣㺄j;㡏綽\u001bN頸쳘橆": -2272208444812560733, "拑Wﵚj鵼駳Oࣿ)#㾅顂N傓纝y僱栜'Bꐍ-!KF*ꭇK¦?䈴^:啤wG逭w᧯": "xᣱmYe1ۏ@霄F$ě꧘푫O䤕퀐Pq52憬ꀜ兴㑗ᡚ?L鷝ퟐ뭐zJꑙ}╆ᅨJB]\"袌㺲u8䯆f", "꿽၅㔂긱Ǧ?SI": -1669030251960539193, "쇝ɨ`!葎>瞺瘡驷錶❤ﻮ酜=": -6961311505642101651, "?f7♄꫄Jᡔ훮e읇퍾፣䭴KhखT;Qty}O\\|뫁IῒNe(5惁ꥶㆷY9ﮡ\\ oy⭖-䆩婁m#x봉>Y鈕E疣s驇↙ᙰm<": {"퉻:dꂁ&efᅫ쫢[\"돈늖꺙|Ô剐1͖-K:ʚ᭕/;쏖㷛]I痐职4gZ4⍜kเꛘZ⥺\\Bʫᇩ鄨魢弞&幟ᓮ2̊盜", -9006004849098116748, -3118404930403695681, { "_彃Y艘-\"Xx㤩㳷瑃?%2䐡鵛o귵옔夘v*탋职&㳈챗|O钧": [ false, "daꧺdᗹ羞쯧H㍤鄳頳<型孒ン냆㹀f4㹰\u000f|C*ሟ鰠(O<ꨭ峹ipຠ*y೧4VQ蔔hV淬{?ᵌEfrI_", "j;ꗣ밷邍副]ᗓ", -4299029053086432759, -5610837526958786727, [ null, [ -1.3958390678662759E-19, { "lh좈T_믝Y\"伨\u001cꔌG爔겕ꫳ晚踍⿻읐T䯎]~e#฽燇\"5hٔ嶰`泯r;ᗜ쮪Q):/t筑,榄&5懶뎫狝(": [{ "2ፁⓛ]r3C攟וּ9賵s⛔6'ஂ|\"ⵈ鶆䐹禝3\"痰ࢤ霏䵩옆䌀?栕r7O簂Isd?K᫜`^讶}z8?z얰T:X倫⨎ꑹ": -6731128077618251511, "|︦僰~m漿햭\\Y1'Vvخ굇ቍ챢c趖": [null] }], "虌魿閆5⛔煊뎰㞤ᗴꥰF䮥蘦䂪樳-K᝷-(^\u20dd_": 2.11318679791770592E17 } ] ] ]}, "묗E䀳㧯᳀逞GMc\b墹㓄끖Ơ&U??펌鑍 媋k))ᄊ": null, "묥7콽벼諌J_DɯﮪM殴䣏,煚ྼ`Y:씧<\/⩫%yf䦀!1Ჶk춎Q米W∠WC跉鬽*ᛱi㴕L꘻ꀏ쓪\"_g鿄'#t⽙?,Wg㥖|D鑆e⥏쪸僬h鯔咼ඡ;4TK聎졠嫞" } ] ] } ] ] ]}} } ]} }, "뿋뀾淣截䔲踀&XJ펖꙯^Xb訅ꫥgᬐ>棟S\"혧騾밫겁7-": "擹8C憎W\"쵮yR뢩浗絆䠣簿9䏈引Wcy䤶孖ꯥ;퐌]輩䍐3@{叝 뽸0ᡈ쵡Ⲇ\u001dL匁꧐2F~ݕ㪂@W^靽L襒ᦘ~沦zZ棸!꒲栬R" } ] ], "Z:덃൛5Iz찇䅄駠㭧蓡K1": "e8᧤좱U%?ⵇ䯿鿝\u0013縮R∱骒EO\u000fg?幤@֗퉙vU`", "䐃쪈埽້=Ij,쭗쓇చ": false }]}} ] } ]} } ] ] ], "咰긖VM]᝼6䓑쇎琺etDҌ?㞏ꩄ퇫밉gj8蠃\"⩐5䛹1ࣚ㵪": "ക蹊?⎲⧘⾚̀I#\"䈈⦞돷`wo窭戕෱휾䃼)앷嵃꾞稧,Ⴆ윧9S?೗EMk3Მ3+e{⹔Te驨7䵒?타Ulg悳o43" } ], "zQᤚ纂땺6#ٽ﹧v￿#ࠫ휊冟蹧텈ꃊʆ?&a䥯De潝|쿓pt瓞㭻啹^盚2Ꝋf醪,얏T窧\\Di䕎谄nn父ꋊE": -2914269627845628872, "䉩跐|㨻ᷢ㝉B{蓧瞸`I!℄욃힕#ೲᙾ竛ᔺCjk췒늕貭词\u0017署?W딚%(pꍁ⤼띳^=on뺲l䆼bzrﳨ[&j狸䠠=ᜑꦦ\u2061յnj=牲攑)M\\龏": false, "뎕y絬᫡⥮Ϙᯑ㌔/NF*˓.,QEzvK!Iwz?|쥾\"ꩻL꼗Bꔧ賴緜s뉣隤茛>ロ?(?^`>冺飒=噸泥⺭Ᲊ婓鎔븜z^坷裮êⓅ໗jM7ﶕ找\\O": 1.376745434746303E-19 }, "䐛r滖w㏤,|Nዜ": false } ]], "@꿙?薕尬 gd晆(띄5躕ﻫS蔺4)떒錸瓍?~": 1665108992286702624, "w믍nᏠ=`঺ᅥC>'從됐槷䤝眷螄㎻揰扰XᅧC贽uჍ낟jKD03T!lDV쀉Ӊy뢖,袛!终캨G?鉮Q)⑗1쾅庅O4ꁉH7?d\u0010蠈줘월ސ粯Q!낇껉6텝|{": null, "~˷jg쿤촖쉯y": -5.5527605669177098E18, "펅Wᶺzꐆと푭e?4j仪열[D<鈑皶婆䵽ehS?袪;HꍨM뗎ば[(嗏M3q퍟g4y╸鰧茀[Bi盤~﫝唎鋆彺⦊q?B4쉓癚O洙킋툈䶯_?ퟲ": null } ] ]] ]], "꟱Ԕ㍤7曁聯ಃ錐V䷰?v㪃૦~K\"$%请|ꇹn\"k䫛㏨鲨\u2023䄢\u0004[︊VJ?䶟ាꮈ䗱=깘U빩": -4863152493797013264 } ]}]} ] }}} ], "쏷쐲۹퉃~aE唙a챑,9㮹gLHd'䔏|킗㍞䎥&KZYT맵7䥺Nⱳ同莞鿧w\\༌疣n/+ꎥU\"封랾○ퟙAJᭌ?9䛝$?驔9讐짘魡T֯c藳`虉C읇쐦T" } ], "谶개gTR￐>ၵ͚dt晑䉇陏滺}9㉸P漄": -3350307268584339381 }] ] ] ]] ] ], "0y꟭馋X뱔瑇:䌚￐廿jg-懲鸭䷭垤㒬茭u賚찶ಽ+\\mT땱\u20821殑㐄J쩩䭛ꬿNS潔*d\\X,壠뒦e殟%LxG9:摸": 3737064585881894882, "풵O^-⧧ⅶvѪ8廸鉵㈉ר↝Q㿴뺟EႳvNM:磇>w/៻唎뷭୥!냹D䯙i뵱貁C#⼉NH6`柴ʗ#\\!2䂗Ⱨf?諳.P덈-返I꘶6?8ꐘ": -8934657287877777844, "溎-蘍寃i诖ര\"汵\"\ftl,?d⼡쾪⺋h匱[,෩I8MҧF{k瓿PA'橸ꩯ綷퉲翓": null } ] ], "ោ係؁<元": 1.7926963090826924E-18 }}] } ] ]]}] }] ] ] ] ], "ጩV<\"ڸsOᤘ": 2.0527167903723048E-19 }] ]} ] ]], "∳㙰3젴p᧗䱙?`yZA8Ez0,^ᙛ4_0븢\u001ft:~䎼s.bb룦明yNP8弆C偯;⪾짍'蕴뮛": -6976654157771105701, "큵ꦀ\\㇑:nv+뒤燻䀪ﴣ﷍9ᚈ኷K㚊誦撪䚛,ꮪxሲ쳊\u0005HSf?asg昱dqꬌVꙇ㼺'k*'㈈": -5.937042203633044E-20 } ] }], "?}\u20e0],s嶳菋@#2u쒴sQS䩗=ꥮ;烌,|ꘔ䘆": "ᅩ영N璠kZ먕眻?2ቲ芋眑D륟渂⸑ﴃIRE]啗`K'" }}, "쨀jmV賂ﰊ姐䂦玞㬙ᏪM᪟Վ씜~`uOn*ॠ8\u000ef6??\\@/?9見d筜ﳋB|S䝬葫㽁o": true }, "즛ꄤ酳艚␂㺘봿㎨iG৕ࡿ?1\"䘓您\u001fSኝ⺿溏zៀ뻤B\u0019?윐a䳵᭱䉺膷d:<\/": 3935553551038864272 } ] ]} ]] ]] ]} } ] } ]]}}, "᥺3h↛!ꋰy\"攜(ெl䪕oUkc1A㘞ᡲ촾ᣫ<\/䒌E㛝潨i{v?W౾H\\RჅpz蝬R脾;v:碽✘↯삞鷱o㸧瑠jcmK7㶧뾥찲n": true, "ⶸ?x䊺⬝-䰅≁!e쩆2ꎿ准G踌XXᩯ1߁}0?.헀Z馟;稄\baDꟹ{-寪⚈ꉷ鮸_L7ƽᾚ<\u001bጨA䧆송뇵⨔\\礍뗔d设룱㶉cq{HyぱR㥽吢ſtp": -7985372423148569301, "緫#콮IB6<\/=5Eh礹\t8럭@饹韠r㰛斣$甝LV췐a갵'请o0g:^": "䔨(.", "띳℡圤pン௄ĝ倧訜B쁟G䙔\"Sb⓮;$$▏S1J뢙SF|赡g*\"Vu䲌y": "䪈&틐),\\kT鬜1풥;뷴'Zေ䩹@J鞽NぼM?坥eWb6榀ƩZڮ淽⺞삳煳xჿ絯8eⶍ羷V}ჿ쎱䄫R뱃9Z>'\u20f1ⓕ䏜齮" } ] ]]] }} } ] ]}, "펮b.h粔폯2npX詫g錰鷇㇒<쐙S値bBi@?镬矉`剔}c2壧ଭfhY깨R()痩⺃a\\⍔?M&ﯟ<劜꺄멊ᄟA\"_=": null }, "~潹Rqn榢㆓aR鬨侅?䜑亡V_翅㭔(䓷w劸ၳDp䀅<\/ﰎ鶊m䵱팱긽ꆘ긓准D3掱;o:_ќ)껚콥8곤d矦8nP倥ꃸI": null, "뾎/Q㣩㫸벯➡㠦◕挮a鶧⋓偼\u00001뱓fm覞n?㛅\"": 2.8515592202045408E17 }], ",": -5426918750465854828, "2櫫@0柡g䢻/gꆑ6演&D稒肩Y?艘/놘p{f투`飷ᒉ챻돎<늛䘍ﴡ줰쫄": false, "8(鸑嵀⵹ퟡ<9㣎Tߗ┘d슒ل蘯&㠦뮮eࠍk砝g 엻": false, "d-\u208b?0ﳮ嵙'(J`蔿d^踅⤔榥\\J⵲v7": 6.8002426206715341E17, "ཎ耰큓ꐕ㱷\u0013y=詽I\"盈xm{0쾽倻䉚ષso#鰑/8㸴짯%ꀄ떸b츟*\\鲷礬ZQ兩?np㋄椂榨kc᡹醅3": false, "싊j20": false }]] ]], "俛\u0017n緽Tu뫉蜍鼟烬.ꭠIⰓ\"Ἀ᜾uC쎆J@古%ꛍm뻨ᾀ画蛐휃T:錖㑸ዚ9죡$": true } ] ], "㍵⇘ꦖ辈s}㱮慀밒s`\"㞟j:`i픻Z섫^諎0Ok{켿歁෣胰a2﨤[탳뚬쎼嫭뉮m": 409440660915023105, "w墄#*ᢄ峠밮jLa`ㆪ꺊漓Lで끎!Agk'ꁛ뢃㯐岬D#㒦": false, "ଦPGI䕺L몥罭ꃑ궩﮶#⮈ᢓӢ䚬p7웼臧%~S菠␌힀6&t䳙y㪘냏\\*;鉏ᅧ鿵'嗕pa\"oL쇿꬈Cg": "㶽1灸D⟸䴅ᆤ뉎﷛渤csx 䝔цꬃ锚捬?ຽ+x~꘩uI࡞\u0007栲5呚ẓem?袝\")=㥴䨃pac!/揎Y", "ᷱo\\||뎂몷r篙|#X䦜I#딌媸픕叞RD斳X4t⯩夬=[뭲r=绥jh뷱츝⪘%]⚋܈㖴スH텹m(WO曝劉0~K3c柢Ր㏉着逳~": false, "煽_qb[첑\\륌wE❽ZtCNﭝ+餌ᕜOꛭ": "{ﳾ쉌&s惧ᭁⵆ3䢫;䨞팑꒪흘褀࢖Q䠿V5뭀䎂澻%받u5텸oA⮥U㎦;B䳌wz䕙$ឿ\\௅婺돵⪾퐆\\`Kyौꋟ._\u0006L챯l뇠Hi䧈偒5", "艊佁ࣃ롇䱠爬!*;⨣捎慓q靓|儑ᨋL+迥=6㒺딉6弄3辅J-㕎뛄듘SG㆛(\noAzQꝱ䰩X*ぢO퀌%펠낌mo틮a^<\/F&_눊ᾉ㨦ы4\"8H": 2974648459619059400, "鬙@뎣䫳ၮ끡?){y?5K;TA*k溱䫜J汃ꂯ싔썍\u001dA}룖(<\/^,": false, "몏@QꋦFꊩᒐ뎶lXl垨4^郣|ꮇ;䝴ᝓ}쵲z珖": null } ]]]], ":_=닧弗D䙋暨鏛. 㱻붘䂍J儒&ZK/녩䪜r囁⽯D喠죥7⹌䪥c\u001a\u2076￞妈朹oLk菮F౟覛쐧㮏7T;}蛙2{9\"崓bB<\/⡷룀;즮鿹)丒툃୤뷠5W⊢嶜(fb뭳갣": "E{响1WM" }}, "䘨tjJ驳豨?y輊M*᳑梵瞻઻ofQG瑮e": 2.222802939724948E-19, "䮴=❑➶T෋w䞜\"垦ꃼUt\u001dx;B$뵣䙶E↌艣ᡥ!᧟;䱀[䔯k쬃`੍8饙른熏'2_'袻tGf蒭J땟as꯳╖&啒zWࡇᒫYSᏬ\u0014ℑ첥鈤|cG~Pᓮ\">\"": "ႆl\f7V儊㦬nHꄬꨧC{쐢~C⮃⛓嶦vꄎ1w鰠嘩뿠魄&\"_qMⵖ釔녮ꝇ 㝚{糍J哋 cv?-jkﻯྌ鹑L舟r", "龧葆yB✱H盋夔ﶉ?n*0(": "ꧣኆ㢓氥qZZ酒ຜ)鮢樛)X䣆gTSґG텞k.J圬疝롫쯭z L:\\ྤ@w炋塜쿖ᾳy뢀䶃뱝N䥨㚔勇겁#p", "도畎Q娡\"@S/뼋:䵏!P衅촚fVHQs✜ᐫi㻑殡B䜇%믚k*U#濨낄~": "ꍟዕ쳸ꍈ敋&l妏\u0005憡멗瘌uPgᅪm<\/To쯬锩h뒓k" } ] }], "墥홞r绚<\/⸹ⰃB}<躅\\Y;๑@䔸>韫䜲뱀X뗩鿥쩗SI%ﴞ㳕䛇?<\/\u00018x\\&侂9鋙a[LR㋭W胕)⡿8㞙0JF,}?허d1cDMᐃ␛鄝ⱕ%X)!XQ": "ⳍꗳ=橇a;3t⦾꼑仈ူaᚯ⯋ꕃAs鴷N⍕_䎃ꙎAz\u0016䯷\\<࿫>8q{}キ?ᣰ}'0ᴕ펓B┦lF#趤厃T?㕊#撹圂䆲" }, "܋닐龫論c웑": false, "ㇿ/q\"6-co髨휝C큦#\u001b4~?3䐹E삇<<": 7.600917488140322E-20, "䁝E6?㣖ꃁ间t祗*鑠{ḣV(浾h逇큞=W?ૉ?nꇽ8ꅉຉj으쮺@Ꚅ㰤u]Oyr": "v≁᫸_*όAඤԆl)ۓᦇQ}폠z༏q滚", "ソ᥊/넺I": true }]] ] ] ] ]] }, "䭑Ik攑\u0002QV烄:芩.麑㟴㘨≕": true, "坄꿕C쇻풉~崍%碼\\8\"䬦꣙": null, "欌L圬䅘Y8c(♺2?ON}o椳s宥2䉀eJ%闹r冁O^K諭%凞⺉⡻,掜?$ꥉ?略焕찳㯊艼誜4?\"﯎<゛XፈINT:詓 +": -1.0750456770694562E-19, "獒àc뜭싼ﺳ뎤K`]p隨LtE": null, "甙8䵊神EIꩤ鐯ᢀ,ﵮU䝑u疒ử驺䚿≚ഋ梶秓F`覤譐#짾蔀묊4<媍쬦靪_Yzgcࡶ4k紥`kc[Lﮗ簐*I瀑[⾰L殽鑥_mGȠ<\/|囹灠g桰iri": true, "챓ꖙꟻ좝菇ou,嗠0\\jK핻뜠qwQ?ഩ㼕3Y彦b\u009bJ榶N棨f?됦鏖綃6鳵M[OE봨u햏.Ꮁ癜蟳뽲ꩌ뻾rM豈R嗀羫 uDꎚ%": null }, "V傜2<": 7175127699521359521 }], "铫aG切<\/\"ী⊆e<^g࢛)D顝nאַ饼\u008c猪繩嵿ﱚCꡬ㻊g엺A엦\u000f暿_f꿤볝㦕桦`蒦䎔j甬%岝rj 糏": "䚢偎눴Au<4箞7礦Iﱔ坠eȧ䪸u䵁p|逹$嗫쨘ꖾ﷐!胠z寓팢^㨔|u8Nሇe텔ꅦ抷]،鹎㳁#༔繁 ", "낂乕ꃻ볨ϱ-ꇋ㖍fs⿫)zꜦ/K?솞♞ꑌ宭hJ᤭瑥Fu": false, "쟰ぜ魛G\u0003u?`㾕ℾ㣭5螠烶這趩ꖢ:@咕ꐶx뒘느m䰨b痃렐0鳊喵熬딃$摉_~7*ⱦ녯1錾GKhJ惎秴6'H妈Tᧅ窹㺒疄矤铟wላ": null, "쯆q4!3錕㲏ⵆ㇛꘷Z瑩뭆\\◪NH\u001d\\㽰U~㯶<\"쑣낞3ᵤ'峉eꢬ;鬹o꣒木X*長PXᘱu\"䠹n惞": null, "ᅸ祊\"&ꥴCjࢼ﴿?䡉`U效5殼㮞V昽ꏪ#ﺸ\\&t6x꠹盥꣰a[\u001aꪍSpe鎿蠹": -1.1564713893659811E-19 } ]] ] ] ], "羵䥳H,6ⱎ겾|@t\"#햊1|稃 섭)띜=뻔ꡜ???櫎~*ῡ꫌/繣ﻠq": null } ]} ]}, "츤": false }}, "s": 3.7339341963399598E18 } ], "N,I?1+㢓|ࣱ嶃쩥V2\u0012(4EE虪朶$|w颇v步": "~읢~_,Mzr㐫YB溓E淚\"ⅹ䈔ᏺ抙 b,nt5V㐒J檶ꏨ⻔?", "Q껑ꡡ}$넎qH煔惍/ez^!ẳF댙䝌馻剁8": "梲;yt钰$i冄}AL%a j뜐奷걳뚾d꿽*ሬuDY3?뮟鼯뮟w㍪틱V", "o{Q/K O胟㍏zUdꀐm&⨺J舕⾏魸訟㌥[T籨櫉唐킝 aṭ뱫촙莛>碶覆⧬짙쭰ׯdAiH໥벤퐥_恸[ 0e:죃TC弼荎뵁DA:w唵ꣁ": null, "὏樎䵮軧|?౗aWH쩃1 ꅭsu": null } ] }, "勂\\&m鰈J釮=Ⲽ鳋+䂡郑": null, "殣b綊倶5㥗惢⳷萢ᑀ䬄镧M^ﱴ3⣢翣n櫻1㨵}ኯ뗙顖Z.Q➷ꮨ뗇\u0004": "ꔙ䁼>n^[GीA䨟AM琢ᒊS쨲w?d㶣젊嘶纝麓+愣a%気ྞSc됓ᔘ:8bM7Xd8㶑臌]Ꙥ0ꐭ쒙䫣挵C薽Dfⵃ떼᷸", "?紡.셪_෨j\u0013Ox┠$Xᶨ-ᅇo薹-}軫;y毝㪜K㣁?.EV쮱4둽⛻䤜'2盡\u001f60(|e쐰㼎ᦀ㒧-$l@ﻑ坳\u0003䭱响巗WFo5c㧆T턁Y맸♤(": -2.50917882560589088E17 }} ], "侸\\릩.᳠뎠狣살cs项䭩畳H1s瀉븇19?.w骴崖㤊h痠볭㞳㞳䁮Ql怠㦵": "@䟴-=7f", "鹟1x௢+d ;vi䭴FSDS\u0004hꎹ㚍?⒍⦏ў6u,扩@됷Su)Pag휛TᒗV痩!瞏釀ꖞ蘥&ೞ蘐ꭰꞇᝎ": "ah懱Ժ&\u20f7䵅♎඀䞧鿪굛ౕ湚粎蚵ᯋ幌YOE)५襦㊝Y*^\"R+ඈ咷蝶9ꥂ榨艦멎헦閝돶v좛咊E)K㓷ྭr", "搆q쮦4綱켙셁.f4<\/g<籽늷?#蚴픘:fF\u00051㹉뀭.ᰖ풎f֦Hv蔎㧤.!䭽=鞽]음H:?\"-4": 8.740133984938656E-20 }]} } ], "tVKn딩꘥⊾蹓᤹{\u0003lR꼽ᄲQFᅏ傅ﱋ猢⤊ᔁ,E㓒秤nTතv`♛I\u0000]꫔ṞD\"麵c踝杰X&濿또꣹깳౥葂鿎\\aꡨ?": 3900062609292104525 } ], "ਉ샒⊩Lu@S䧰^g": -1.1487677090371648E18, "⎢k⑊꬗yᏫ7^err糎Dt\u000bJ礯확ㆍ沑サꋽe赔㝢^J\u0004笲㿋idra剰-᪉C錇/Ĝ䂾ညS지?~콮gR敉⬹'䧭": 1901472137232418266, "灗k䶥:?촽贍쓉꓈㒸g獘[뵎\\胕?\u0014_榙p.j稶,$`糉妋0>Fᡰly㘽$?": "]ꙛO赎&#㠃돱剳\"<◆>0誉齐_|z|裵씪>ᐌ㼍\"Z[琕}O?G뚇諦cs⠜撺5cu痑U圲\u001c?鴴計l춥/╓哼䄗茏ꮅ뫈댽A돌롖뤫V窗讬sHd&\nOi;_u" } ], "Uﺗ\\Y\\梷䄬~\u0002": null, "k\"Y磓ᗔ휎@U冈<\/w컑)[": false, "曏J蝷⌻덦\u001f㙳s꥓⍟邫P늮쥄c∬ྡྷ舆렮칤Z趣5콡넛A쳨\\뀙骫(棻.*&輛LiIfi{@EA婳KᬰTXT": -4.3088230431977587E17 }]} ] ], "곃㲧<\/dఓꂟs其ࡧ&N葶=?c㠤Ჴ'횠숄臼#\u001a~": false } ] ]}] }] }} ], "2f`⽰E쵟>J笂裭!〛觬囀ۺ쟰#桊l鹛ⲋ|RA_Vx፭gE됓h﵀mfỐ|?juTU档[d⢼⺻p濚7E峿": 5613688852456817133 }, "濘끶g忮7㏵殬W팕Q曁 뫰)惃廊5%-蹚zYZ樭ﴷQ锘쯤崫gg": true, "絥ᇑ⦏쒓븣爚H.㗊߄o蘵貆ꂚ(쎔O᥉ﮓ]姨Wꁓ!RMA|o퉢THx轮7M껁U즨'i뾘舯o": "跥f꜃?" }} ], "鷰鹮K-9k;ﰰ?_ݦѷ-ꅣ䩨Zꥱ\"mꠟ屎/콑Y╘2&鸞脇㏢ꀇ࠺ⰼ拾喭틮L꽩bt俸墶 [l/웄\"꾦\u20d3iও-&+\u000fQ+໱뵞": -1.296494662286671E-19 }, "HX੹/⨇୕붷Uﮘ旧\\쾜͔3l鄈磣糂̖䟎Eᐳw橖b῀_딕hu葰窳闹вU颵|染H죶.fP䗮:j䫢\\b뎖i燕ꜚG⮠W-≚뉗l趕": "ଊ칭Oa᡺$IV㷧L\u0019脴셀붿餲햪$迳向쐯켂PqfT\" ?I屉鴼쿕@硙z^鏕㊵M}㚛T젣쓌-W⩐-g%⺵<뮱~빅╴瑿浂脬\u0005왦燲4Ⴭb|D堧 <\/oEQh", "䘶#㥘੐캔f巋ἡAJ䢚쭈ࣨ뫒*mᇊK,ࣺAꑱ\u000bR<\/A\"1a6鵌㯀bh곿w(\"$ꘁ*rಐ趣.d࿩k/抶면䒎9W⊃9": "漩b挋Sw藎\u0000", "畀e㨼mK꙼HglKb,\"'䤜": null }]}] ] ] }] ]} ] ]} ], "歙>駿ꣂ숰Q`J΋方樛(d鱾뼣(뫖턭\u20f9lচ9歌8o]8윶l얶?镖G摄탗6폋폵+g:䱫홊<멀뀿/س|ꭺs걐跶稚W々c㫣⎖": "㣮蔊깚Cꓔ舊|XRf遻㆚︆'쾉췝\\&言", "殭\"cށɨꝙ䞘:嬮e潽Y펪㳅/\"O@ࠗ겴]췖YǞ(t>R\"N?梳LD恭=n氯T豰2R諸#N}*灧4}㶊G䍣b얚": null, "襞<\/啧 B|싞W瓇)6簭鼡艆lN쩝`|펭佡\\間邝[z릶&쭟愱ꅅ\\T᰽1鯯偐栈4̸s윜R7⒝/똽?치X": "⏊躖Cﱰ2Qẫ脐&இ?%냝悊", ",鰧偵셣싹xᎹ힨᯳EṬH㹖9": -4604276727380542356 } } ]]]], "웺㚑xs}q䭵䪠馯8?LB犯zK'os䚛HZ\"L?셎s^㿧㴘Cv2": null }] ] ] ], "Kd2Kv+|z": 7367845130646124107, "ᦂⶨ?ᝢ 祂些ഷ牢㋇操\"腭䙾㖪\\(y4cE뽺ㆷ쫺ᔖ%zfۻ$ў1柦,㶢9r漢": -3.133230960444846E-20, "琘M焀q%㢟f鸯O⣏蓑맕鯊$O噷|)z褫^㢦⠮ꚯ꫞`毕1qꢚ{ĭ䎀বώT\"뱘3G൴?^^of": null } ], "a8V᯺?:ﺃ/8ꉿBq|9啓댚;*i2": null, "cpT瀇H珰Ừpೃi鎪Rr␣숬-鹸ҩ䠚z脚цGoN8入y%趌I┽2ឪЀiJNcN)槣/▟6S숆牟\"箑X僛G殱娇葱T%杻:J諹昰qV쨰": 8331037591040855245 }], "G5ᩜ䄗巢껳": true } }, "Ồ巢ゕ@_譙A`碫鄐㡥砄㠓(^K": "?܃B혢▦@犑ὺD~T⧁|醁;o=J牌9냚⢽㨘{4觍蚔9#$∺\u0016p囅\\3Xk阖⪚\"UzA穕롬✎➁㭒춺C㣌ဉ\"2瓑员ᅽꝶ뫍}꽚ꞇ鶂舟彺]ꍽJC蝧銉", "␆Ě膝\"b-퉐ACR言J謈53~V튥x䜢?ꃽɄY뮩ꚜ": "K/↾e萃}]Bs⾿q룅鷦-膋?m+死^魊镲6", "粡霦c枋AHퟁo礼Ke?qWcA趸㡔ꂏ?\u000e춂8iতᦜ婪\u0015㢼nﵿꍻ!ᐴ関\u001d5j㨻gfῩUK5Ju丝tかTI'?㓏t>⼟o a>i}ᰗ;뤕ܝ": false, "ꄮ匴껢ꂰ涽+䜨B蛹H䛓-k蕞fu7kL谖,'涃V~챳逋穞cT\"vQ쓕ObaCRQ㓡Ⲯ?轭⫦輢墳?vA餽=h䮇킵n폲퉅喙?\"'1疬V嬗Qd灗'Lự": "6v!s믁㭟㣯獃!磸餠ቂh0C뿯봗F鷭gꖶ~コkK<ᦈTt\\跓w㭣횋钘ᆹ듡䑚W䟾X'ꅔ4FL勉Vܴ邨y)2'〚쭉⽵-鞣E,Q.?块", "?(˧쩯@崟吋歄K": null }, "Gc럃녧>?2DYI鴿\\륨)澔0ᔬlx'觔7젘⤡縷螩%Sv׫묈/]↱&S h\u0006歋ᑛxi̘}ひY蔯_醨鯘煑橾8?䵎쨋z儬ꁏ*@츾:": null } } } ] ] ]} }, "HO츧G": 3.694949578823609E17, "QC\u0012(翻曇Tf㷟bGBJ옉53\\嚇ᛎD/\u001b夾၉4\"핀@祎)쫆yD\"i먎Vn㿿V1W᨝䶀": -6150931500380982286, "Z㓮P翸鍱鉼K䋞꘺튿⭁Y": -7704503411315138850, "]모开ꬖP븣c霤<[3aΠ\"黁䖖䰑뮋ꤦ秽∼㑷冹T+YUt\"싳F↭䖏&鋌": -2.7231911483181824E18, "tꎖ": -4.9517948741799555E-19, "䋘즊.⬅IꬃۣQ챢ꄑ黐|f?C⾺|兕읯sC鬸섾整腨솷V": "旆柩l쪦sᖸMy㦅울썉瘗㎜檵9ꍂ駓ૉᚿ/u3씅徐拉[Z䞸ࡗ1ꆱ&Q풘?ǂ8\u0011BCDY2볨;鸏": null, "幫 n煥s쁇펇 왊-$C\"衝:\u0014㣯舼.3뙗Yl⋇\"K迎멎[꽵s}9鉳UK8쐥\"掄㹖h㙈!얄સ?Ꜳ봺R伕UTD媚I䜘W鏨蔮": -4.150842714188901E-17, "ﺯ^㄄\b죵@fྉkf颡팋Ꞧ{/Pm0V둳⻿/落韒ꊔᚬ@5螺G\\咸a谆⊪ቧ慷绖?财(鷇u錝F=r၍橢ឳn:^iᴵtD볠覅N赴": null }] }] } ] ]} ]}, "謯?w厓奰T李헗聝ឍ貖o⪇弒L!캶$ᆅ": -4299324168507841322, "뺊奉_垐浸延몏孄Z舰2i$q붿좾껇d▵餏\"v暜Ҭ섁m￴g>": -1.60911932510533427E18 } ] } ] ]], "퉝꺔㠦楶Pꅱ": 7517896876489142899, "": false } ]}, "是u&I狻餼|谖j\"7c됮sסּ-踳鉷`䣷쉄_A艣鳞凃*m⯾☦椿q㎭N溔铉tlㆈ^": 1.93547720203604352E18, "kⲨ\\%vr#\u000bⒺY\\t<\/3﬌R訤='﹠8蝤Ꞵ렴曔r": false } ]}, "阨{c?C\u001d~K?鎌Ԭ8烫#뙣P초遗t㭱E­돒䆺}甗[R*1!\\~h㕅᰺@<9JꏏષI䳖栭6綘걹ᅩM\"▯是∔v鬽顭⋊譬": "운ﶁK敂(欖C취پ℄爦賾" } }} }], "鷨赼鸙+\\䭣t圙ڹx᜾ČN<\/踘\"S_맶a鷺漇T彚⎲i㈥LT-xA캔$\u001cUH=a0츺l릦": "溣㣂0濕=鉵氬駘>Pꌢpb솇쬤h힊줎獪㪬CrQ矠a&脍꼬爼M茴/΅\u0017弝轼y#Ꞡc6둴=?R崏뷠麖w?" }, "閕ᘜ]CT)䵞l9z'xZF{:ؐI/躅匽졁:䟇AGF૸\u001cퟗ9)駬慟ꡒꆒRS״툋A<>\u0010\"ꂔ炃7g덚E৏bꅰ輤]o㱏_뷕ܘ暂\"u": "芢+U^+㢩^鱆8*1鈶鮀\u0002뺰9⬳ꪮlL䃣괟,G8\u20a8DF㉪錖0ㄤ瓶8Nଷd?眡GLc陓\\_죌V쁰ल二?c띦捱 \u0019JC\u0011b⤉zẒT볕\"绣蘨뚋cꡉkI\u001e鳴", "ꃣI'{6u^㡃#཰Kq4逹y൒䧠䵮!㱙/n??{L풓ZET㙠퍿X2᩟綳跠葿㚙w཮x캽扳B唕S|尾}촕%N?o䪨": null, "ⰴFjෟ셈[\u0018辷px?椯\\1<ﲻ栘ᣁ봢憠뉴p": -5263694954586507640 } ] ]] ]} ]}] ] ], "?#癘82禩鋆ꊝty?&": -1.9419029518535086E-19 } ] ] ]} ] ] ], "훊榲.|῕戄&.㚏Zꛦ2\"䢥ሆ⤢fV_摕婔?≍Fji冀탆꜕i㏬_ẑKᅢ꫄蔻XWc|饡Siẘ^㲦?羡2ぴ1縁ᙅ?쐉Ou": false }]] ]}}}, "慂뗄卓蓔ᐓ匐嚖/颹蘯/翻ㆼL?뇊,텵<\\獷ごCボ": null }, "p溉ᑟi짣z:䒤棇r^٫%G9缑r砌롧.물农g?0׼ሩ4ƸO㣥㯄쩞ጩ": null, "껎繥YxK\"F젷쨹뤤1wq轫o?鱑뜀瘊?뎃h灑\\ꛣ}K峐^ኖ⤐林ꉓhy": null } ], "᱀n肓ㄛ\"堻2>m殮'1橌%Ꞵ군=Ӳ鯨9耛<\/n據0u彘8㬇៩f᏿诙]嚊": "䋯쪦S럶匏ㅛ#)O`ሀX_鐪渲⛀㨻宅闩➈ꢙஶDR⪍" }, "tA썓龇 ⋥bj왎录r땽✒롰;羋^\\?툳*┎?썀ma䵳넅U䳆૘〹䆀LQ0\b疀U~u$M}(鵸g⳾i抦뛹?䤈땚검.鹆?ꩡtⶥGĒ;!ቹHS峻B츪켏f5≺": 2366175040075384032, "전pJjleb]ួ": -7.5418493141528422E18, "n.鎖ጲ\n?,$䪘": true }, "欈Ar㉣螵᪚茩?O)": null }, "쫸M#x}D秱欐K=侫们丐.KꕾxẠ\u001e㿯䣛F܍캗qq8꟞ṢFD훎⵳簕꭛^鳜\u205c٫~⑟~冫ऊ2쫰<\/戲윱o<\"": true }, "㷝聥/T뱂\u0010锕|内䞇x侁≦㭖:M?iM᣿IJe煜dG࣯尃⚩gPt*辂.{磼럾䝪@a\\袛?}ᓺB珼": true } } ]]}]}}, "tn\"6ꫤ샾䄄;銞^%VBPwu묪`Y僑N.↺Ws?3C⤻9唩S䠮ᐴm;sᇷ냞඘B/;툥B?lB∤)G+O9m裢0kC햪䪤": -4.5941249382502277E18, "ᚔt'\\愫?鵀@\\びꂕP큠<<]煹G-b!S?\nꖽ鼫,ݛ&頺y踦?E揆릱H}햧캡b@手.p탻>췽㣬ꒅ`qe佭P>ᓂ&?u}毚ᜉ蟶頳졪ᎏzl2wO": -2.53561440423275936E17 }]} } ] ]], "潈촒⿂叡": 5495738871964062986 } ]] } ] ]} ]] ]] ]} ] ]}, "ႁq킍蓅R`謈蟐ᦏ儂槐僻ﹶ9婌櫞釈~\"%匹躾ɢ뤥>࢟瀴愅?殕节/냔O✬H鲽엢?ᮈੁ⋧d␽㫐zCe*": 2.15062231586689536E17, "㶵Ui曚珰鋪ᾼ臧P{䍏䷪쨑̟A뼿T渠誈䏚D1!잶<\/㡍7?)2l≣穷᛾稝{:;㡹nemיּ訊`G": null, "䀕\"飕辭p圁f#뫆䶷뛮;⛴ᩍ3灚덏ᰝ쎓⦷詵%᜖Մfs⇫(\u001e~P|ﭗCⲾផv湟W첋(텪બT<บSꏉ੗⋲X婵i ӵ⇮?L䬇|ꈏ?졸": 1.548341247351782E-19 } ] }, "t;:N\u0015q鐦Rt缆{ꮐC?஛㷱敪\\+鲊㉫㓪몗릙竏(氵kYS": "XᰂT?൮ô", "碕飦幑|+ 㚦鏶`镥ꁩ B<\/加륙": -4314053432419755959, "秌孳(p!G?V傫%8ሽ8w;5鲗㦙LI檸\u2098": "zG N볞䆭鎍흘\\ONK3횙<\/樚立圌Q튅k쩎Ff쁋aׂJK銆ઘ즐狩6༥✙䩜篥CzP(聻駇HHퟲ讃%,ά{렍p而刲vy䦅ክ^톺M楒鍢㹳]Mdg2>䤉洞", "踛M젧>忔芿㌜Zk": 2215369545966507819, "씐A`$槭頰퍻^U覒\bG毲aᣴU;8!팲f꜇E⸃_卵{嫏羃X쀳C7뗮m(嚼u N܁谟D劯9]#": true, "ﻩ!뵸-筚P᭛}ἰ履lPh?౮ⶹꆛ穉뎃g萑㑓溢CX뾇G㖬A錟]RKaꄘ]Yo+@䘁's섎襠$^홰}F": null }, "粘ꪒ4HXᕘ蹵.$區\r\u001d묁77pPc^y笲Q<\/ꖶ 訍䃍ᨕG?*": 1.73773035935040224E17 }, "婅拳?bkU;#D矠❴vVN쩆t㜷A풃갮娪a%鮏絪3dAv룒#tm쑬⌛qYwc4|L8KZ;xU⓭㳔밆拓EZ7襨eD|隰ऌ䧼u9Ԣ+]贴P荿": 2.9628516456987075E18 }]}}] ]} }} ]}] ], "|g翉F*湹̶\u0005⏐1脉̀eI쩓ᖂ㫱0碞l䴨ꑅ㵽7AtἈ턧yq䳥塑:z:遀ᄐX눔擉)`N3昛oQ셖y-ڨ⾶恢ꈵq^<\/": null, "菹\\랓G^璬x৴뭸ゆUS겧﮷Bꮤ ┉銜᯻0%N7}~f洋坄Xꔼ<\/4妟Vꄟ9:౟곡t킅冩䧉笭裟炂4봋ⱳ叺怊t+怯涗\"0㖈Hq": false, "졬믟'ﺇফ圪쓬멤m邸QLব䗁愍4jvs翙 ྍ꧀艳H-|": null, "컮襱⣱뗠 R毪/鹙꾀%헳8&": -5770986448525107020 } ], "B䔚bꐻ뙏姓展槰T-똌鷺tc灿᫽^㓟䏀o3o$꘭趙萬I顩)뇭Ἑ䓝\f@{ᣨ`x3蔛": null } ] ] }], "⦖扚vWꃱ꥙㾠壢輓{-⎳鹷贏璿䜑bG倛⋐磎c皇皩7a~ﳫU╣Q࠭ꎉS摅姽OW.홌ೞ.": null, "蚪eVlH献r}ᮏ믠ﰩꔄ@瑄ⲱ": null, "퀭$JWoꩢg역쁍䖔㑺h&ୢtXX愰㱇?㾫I_6 OaB瑈q裿": null, "꽦ﲼLyr纛Zdu珍B絟쬴糔?㕂짹䏵e": "ḱ\u2009cX9멀i䶛簆㳀k" } ]]]], "(_ꏮg່澮?ᩑyM<艷\u001aꪽ\\庼뙭Z맷㰩Vm\\lY筺]3㋲2㌩㄀Eਟ䝵⨄쐨ᔟgङHn鐖⤇놋瓇Q탚單oY\"♆臾jHᶈ征ቄ??uㇰA?#1侓": null }, "觓^~ሢ&iI띆g륎ḱ캀.ᓡꀮ胙鈉": 1.0664523593012836E-19, "y詭Gbᔶऽs댁U:杜⤎ϲ쁗⮼D醄诿q뙰I#즧v蔎xHᵿt᡽[**?崮耖p缫쿃L菝,봬ꤦC쯵#=X1瞻@OZc鱗CQTx": null } ] }}], "剘紁\u0004\\Xn⊠6,တױ;嵣崇}讃iႽ)d1\\䔓": null }, "脨z\"{X,1u찜<'k&@?1}Yn$\u0015Rd輲ーa쮂굄+B$l": true, "諳>*쭮괐䵟Ґ+<箁}빀䅱⡔檏臒hIH脟ꩪC핝ଗP좕\"0i<\/C褻D۞恗+^5?'ꂱ䚫^7}㡠cq6\\쨪ꔞꥢ?纖䫀氮蒫侲빦敶q{A煲G": -6880961710038544266 }}] }, "5s⨲JvಽῶꭂᄢI.a৊": null, "?1q꽏쿻ꛋDR%U娝>DgN乭G": -1.2105047302732358E-19 } ] ]}, "qZz`撋뙹둣j碇쁏\\ꆥ\u0018@藴疰Wz)O{F䶛l᷂绘訥$]뮍夻䢋䩇萿獰樧猵⣭j萶q)$꬚⵷0馢W:Ⱍ!Qoe": -1666634370862219540, "t": "=wp|~碎Q鬳Ӎ\\l-<\/^ﳊhn퐖}䍔t碵ḛ혷?靻䊗", "邙쇡㯇%#=,E4勃驆V繚q[Y댻XV㡸[逹ᰏ葢B@u=JS5?bLRn얮㍉⏅ﰳ?a6[&큟!藈": 1.2722786745736667E-19 }, "X블땨4{ph鵋ꉯ웸 5p簂䦭s_E徔濧d稝~No穔噕뽲)뉈c5M윅>⚋[岦䲟懷恁?鎐꓆ฬ爋獠䜔s{\u001bm鐚儸煛%bﯿXT>ꗘ@8G": 1157841540507770724, "媤娪Q杸\u0011SAyᡈ쿯": true, "灚^ಸ%걁<\/蛯?\"祴坓\\\\'흍": -3.4614808555942579E18, "釴U:O湛㴑䀣렑縓\ta)(j:숾却䗌gCiB뽬Oyuq輥厁/7)?今hY︺Q": null } ] ]]]}] ], "I笔趠Ph!<ཛྷ㸞诘X$畉F\u0005笷菟.Esr릙!W☆䲖뗷莾뒭U\"䀸犜Uo3Gꯌx4r蔇᡹㧪쨢準<䂀%ࡡꟼ瑍8炝Xs0䀝销?fi쥱ꆝલBB": -8571484181158525797, "L⦁o#J|\"⽩-㱢d㌛8d\\㶤傩儻E[Y熯)r噤὘勇 }": "e(濨쓌K䧚僒㘍蠤Vᛸ\"络QJL2,嬓왍伢㋒䴿考澰@(㏾`kX$끑эE斡,蜍&~y", "vj.|统圪ᵮPL?2oŶ`밧\"勃+0ue%⿥绬췈체$6:qa렐Q;~晘3㙘鹑": true, "ශؙ4獄⶿c︋i⚅:ん閝Ⳙ苆籦kw{䙞셕pC췃ꍬ␜꟯ꚓ酄b힝hwk꭭M鬋8B耳쑘WQ\\偙ac'唀x᪌\u2048*h짎#ፇ鮠뾏ឿ뀌": false, "⎀jꄒ牺3Ⓝ컴~?親ꕽぼܓ喏瘘!@<튋㐌꿱⩦{a?Yv%⪧笯Uܱ栅E搚i뚬:ꄃx7䙳ꦋ&䓹vq☶I䁘ᾘ涜\\썉뺌Lr%Bc㍜3?ꝭ砿裞]": null, "⭤뙓z(㡂%亳K䌽꫿AԾ岺㦦㼴輞낚Vꦴw냟鬓㹈뽈+o3譻K1잞": 2091209026076965894, "ㇲ\t⋇轑ꠤ룫X긒\"zoY읇희wj梐쐑l侸`e%s": -9.9240075473576563E17, "啸ꮑ㉰!ᚓ}銏": -4.0694813896301194E18, ">]囋੽EK뇜>_ꀣ緳碖{쐐裔[<ನ\"䇅\"5L?#xTwv#罐\u0005래t应\\N?빗;": "v쮽瞭p뭃" } ]], "斴槾?Z翁\"~慍弞ﻆ=꜡o5鐋dw\"?K蠡i샾ogDﲰ_C*⬟iㇷ4nય蟏[㟉U꽌娛苸 ঢ়操贻洞펻)쿗૊許X⨪VY츚Z䍾㶭~튃ᵦ<\/E臭tve猑x嚢": null, "锡⛩<\/칥ꈙᬙ蝀&Ꚑ籬■865?_>L詏쿨䈌浿弥爫̫lj&zx<\/C쉾?覯n?": null, "꾳鑤/꼩d=ᘈn挫ᑩ䰬ZC": "3錢爋6Ƹ䴗v⪿Wr益G韠[\u0010屗9쁡钁u?殢c䳀蓃樄욂NAq赟c튒瘁렶Aૡɚ捍" } ] ] ]} ] ] }]]]}} ]}], "Ej䗳U<\/Q=灒샎䞦,堰頠@褙g_\u0003ꤾfⶽ?퇋!łB〙ד3CC䌴鈌U:뭔咎(Qો臃䡬荋BO7㢝䟸\"Yb": 2.36010731779814E-20, "逸'0岔j\u000e눘먷翌C츊秦=ꭣ棭ှ;鳸=麱$XP⩉駚橄A\\좱⛌jqv䰞3Ь踌v㳆¹gT┌gvLB賖烡m?@E঳i": null }, "曺v찘ׁ?&绫O័": 9107241066550187880 } ] ], "(e屄\u0019昜훕琖b蓘ᬄ0/۲묇Z蘮ဏ⨏蛘胯뢃@㘉8ሪWᨮ⦬ᅳ䅴HI၇쨳z囕陻엣1赳o": true, ",b刈Z,ၠ晐T솝ŕB⩆ou'퐼≃绗雗d譊": null, "a唥KB\"ﳝ肕$u\n^⅄P䟼냉䞸⩪u윗瀱ꔨ#yşs꒬=1|ﲤ爢`t౐튼쳫_Az(Ṋ擬㦷좕耈6": 2099309172767331582, "?㴸U<\/䢔ꯡ阽扆㐤q鐋?f㔫wM嬙-;UV죫嚔픞G&\"Cᗍ䪏풊Q": "VM7疹+陕枡툩窲}翡䖶8欞čsT뮐}璤:jﺋ鎴}HfA൝⧻Zd#Qu茅J髒皣Y-︴[?-~쉜v딏璮㹚䅊﩯<-#\u000e걀h\u0004u抱﵊㼃U<㱷⊱IC進" }, "숌dee節鏽邺p넱蹓+e罕U": true } ], "b⧴룏??ᔠ3ぱ>%郿劃翐ꏬꠛW瞳᫏누躨狀ໄy੽\"ីuS=㨞馸k乆E": "トz݈^9R䬑<ﮛGRꨳ\u000fTT泠纷꽀MRᴱ纊:㠭볮?%N56%鈕1䗍䜁a䲗j陇=뿻偂衋࿘ᓸ?ᕵZ+<\/}H耢b䀁z^f$&㝒LkꢳI脚뙛u": 5.694374481577558E-20 }] } ]], "obj": {"key": "wrong value"}, "퓲꽪m{㶩/뇿#⼢&᭙硞㪔E嚉c樱㬇1a綑᝖DḾ䝩": null }, "key": "6.908319653520691E8", "z": { "6U閆崬밺뀫颒myj츥휘:$薈mY햚#rz飏+玭V㭢뾿愴YꖚX亥ᮉ푊\u0006垡㐭룝\"厓ᔧḅ^Sqpv媫\"⤽걒\"˽Ἆ?ꇆ䬔未tv{DV鯀Tἆl凸g\\㈭ĭ즿UH㽤": null, "b茤z\\.N": [[ "ZL:ᅣዎ*Y|猫劁櫕荾Oj为1糕쪥泏S룂w࡛Ᏺ⸥蚙)", { "\"䬰ỐwD捾V`邀⠕VD㺝sH6[칑.:醥葹*뻵倻aD\"": true, "e浱up蔽Cr෠JK軵xCʨ<뜡癙Y獩ケ齈X/螗唻?<蘡+뷄㩤쳖3偑犾&\\첊xz坍崦ݻ鍴\"嵥B3㰃詤豺嚼aqJ⑆∥韼@\u000b㢊\u0015L臯.샥": false, "l?Ǩ喳e6㔡$M꼄I,(3᝝縢,䊀疅뉲B㴔傳䂴\u0088㮰钘ꜵ!ᅛ韽>": -5514085325291784739, "o㮚?\"춛㵉<\/﬊ࠃ䃪䝣wp6ἀ䱄[s*S嬈貒pᛥ㰉'돀": [{ "(QP윤懊FI<ꃣ『䕷[\"珒嶮?%Ḭ壍಻䇟0荤!藲끹bd浶tl\u2049#쯀@僞": {"i妾8홫": { ",M맃䞛K5nAㆴVN㒊햬$n꩑&ꎝ椞阫?/ṏ세뉪1x쥼㻤㪙`\"$쟒薟B煌܀쨝ଢ଼2掳7㙟鴙X婢\u0002": "Vዉ菈᧷⦌kﮞఈnz*﷜FM\"荭7ꍀ-VR<\/';䁙E9$䩉\f @s?퍪o3^衴cඎ䧪aK鼟q䆨c{䳠5mᒲՙ蘹ᮩ": { "F㲷JGo⯍P덵x뒳p䘧☔\"+ꨲ吿JfR㔹)4n紬G练Q፞!C|": true, "p^㫮솎oc.೚A㤠??r\u000f)⾽⌲們M2.䴘䩳:⫭胃\\፾@Fᭌ\\K": false, "蟌Tk愙潦伩": { "a<\/@ᾛ慂侇瘎": -7271305752851720826, "艓藬/>၄ṯ,XW~㲆w": {"E痧郶)㜓ha朗!N赻瞉駠uC\u20ad辠x퓮⣫P1ࠫLMMX'M刼唳됤": null, "P쓫晥%k覛ዩIUᇸ滨:噐혲lMR5䋈V梗>%幽u頖\\)쟟": null, "eg+昉~矠䧞难\b?gQ쭷筝\\eꮠNl{ಢ哭|]Mn銌╥zꖘzⱷ⭤ᮜ^": [ -1.30142114406914976E17, -1.7555215491128452E-19, null, "渾㨝ߏ牄귛r?돌?w[⚞ӻ~廩輫㼧/", -4.5737191805302129E18, null, "xy࿑M[oc셒竓Ⓔx?뜓y䊦>-D켍(&&?XKkc꩖ﺸᏋ뵞K伕6ী)딀P朁yW揙?훻魢傎EG碸9類៌g踲C⟌aEX舲:z꒸许", 3808159498143417627, null, {"m試\u20df1{G8&뚈h홯J<\/": { "3ஸ厠zs#1K7:rᥞoꅔꯧ&띇鵼鞫6跜#赿5l'8{7㕳(b/j\"厢aq籀ꏚ\u0015厼稥": [ -2226135764510113982, true, null, { "h%'맞S싅Hs&dl슾W0j鿏MםD놯L~S-㇡R쭬%": null, "⟓咔謡칲\u0000孺ꛭx旑檉㶆?": null, "恇I転;￸B2Y`z\\獓w,놏濐撐埵䂄)!䶢D=ഭ㴟jyY": { "$ࡘt厛毣ൢI芁<겿骫⫦6tr惺a": [ 6.385779736989334E-20, false, true, true, [ -6.891946211462334E-19, null, { "]-\\Ꟑ1/薓❧Ὂ\\l牑\u0007A郃)阜ᇒᓌ-塯`W峬G}SDb㬨Q臉⮻빌O鞟톴첂B㺱<ƈmu챑J㴹㷳픷Oㆩs": { "\"◉B\"pᶉt骔J꩸ᄇᛐi╰栛K쉷㉯鐩!㈐n칍䟅難>盥y铿e୔蒏M貹ヅ8嘋퀯䉶ጥ㏢殊뻳\"絧╿ꉑ䠥?∃蓊{}㣣Gk긔H1哵峱": false, "6.瀫cN䇮F㧺?\\椯=ڈT䘆4␘8qv": -3.5687501019676885E-19, "Q?yऴr혴{஀䳘p惭f1ﹸ䅷䕋贲<ྃᄊ繲hq\\b|#QSTs1c-7(䵢\u2069匏絘ꯉ:l毴汞t戀oෟᵶ뮱፣-醇Jx䙬䐁햢0࣫ᡁgrㄛ": "\u0011_xM/蘇Chv;dhA5.嗀绱V爤ﰦi뵲M", "⏑[\"ugoy^儣횎~U\\섯겜論l2jw஌yD腅̂\u0019": true, "ⵯɇ䐲᫿࢚!㯢l샅笶戮1꣖0Xe": null, "劅f넀識b宁焊E찓橵G!ʱ獓뭔雩괛": [{"p⹣켙[q>燣䍃㞽ᩲx:쓤삘7玑퇼0<\/q璂ᑁ[Z\\3䅵䧳\u0011㤧|妱緒C['췓Yꞟ3Z鳱雼P錻BU씧U`ᢶg蓱>.1ӧ譫'L_5V䏵Ц": [ false, false, {"22䂍盥N霂얢躰e9⑩_뵜斌n@B}$괻Yᐱ@䧋V\"☒-諯cV돯ʠ": true, "Ű螧ᔼ檍鍎땒딜qꄃH뜣<獧ूCY吓⸏>XQ㵡趌o끬k픀빯a(ܵ甏끆୯/6Nᪧ}搚ᆚ짌P牰泱鈷^d꣟#L삀\"㕹襻;k㸊\\f+": true, "쎣\",|⫝̸阊x庿k잣v庅$鈏괎炔k쬪O_": [ "잩AzZGz3v愠ꉈⵎ?㊱}S尳௏p\r2>췝IP䘈M)w|\u000eE", -9222726055990423201, null, [ false, {"´킮'뮤쯽Wx讐V,6ᩪ1紲aႈ\u205czD": [ -930994432421097536, 3157232031581030121, "l貚PY䃛5@䭄귻m㎮琸f": 1.0318894506812084E-19, "࢜⩢Ш䧔1肽씮+༎ᣰ闺馺窃䕨8Mƶq腽xc(៯夐J5굄䕁Qj_훨/~価.䢵慯틠퇱豠㼇Qﵘ$DuSp(8Uญ<\/ಟ룴𥳐ݩ$": 8350772684161555590, "ㆎQ䄾\u001bpᩭ${[諟^^骴᤮b^ㅥI┧T㉇⾞\"绦r䰂f矩'-7䡭桥Dz兔V9谶居㺍ᔊ䩯덲.\u001eL0ὅㅷ釣": [{ "<쯬J卷^숞u࠯䌗艞R9닪g㐾볎a䂈歖意:%鐔|ﵤ|y}>;2,覂⶚啵tb*仛8乒㓶B࿠㯉戩oX 貘5V嗆렽낁߼4h䧛ꍺM空\\b꿋貼": 8478577078537189402, "VD*|吝z~h譺aᯒ": { "YI췢K<\/濳xNne玗rJo쾘3핰鴊\"↱AR:ࢷ\"9?\"臁說)?誚ꊏe)_D翾W?&F6J@뺾ꍰNZ醊Z쾈വH嶿?炫㷱鬰M겈᭨b,⻁鈵P䕡䀠८ⱄ홎鄣": { "@?k2鶖㋮\"Oರ K㨇廪儲\u0017䍾J?);\b*묀㗠섳햭1MC V": null, "UIICP!BUA`ᢈ㋸~袩㗪⾒=fB﮴l1ꡛ죘R辂여ҳ7쮡<䩲`熕8頁": 4481809488267626463, "Y?+8먙ᚔ鋳蜩럶1㥔y璜౩`": [ null, 1.2850335807501874E-19, "~V2", 2035406654801997866, { "<숻1>\"": -8062468865199390827, "M㿣E]}qwG莎Gn᝶(ꔙ\\D⬲iꇲs寢t駇S뀡ꢜ": false, "pꝤ㎏9W%>M;-U璏f(^j1?&RB隧 忓b똊E": "#G?C8.躬ꥯ'?냪#< 渟&헿란zpo왓Kj}鷧XﻘMツb䕖;㪻", "vE풤幉xz뱕쫥Ug㦲aH} ᣟp:鬼YᰟH3镔ᴚ斦\\鏑r*2橱G⼔F/.j": true, "RK좬뎂a홠f*f㱉ᮍ⦋潙㨋Gu곌SGI3I뿐\\F',)t`荁蘯囯ﮉ裲뇟쥼_ገ驪▵撏ᕤV": 1.52738225997956557E18, "^k굲䪿꠹B逤%F㱢漥O披M㽯镞竇霒i꼂焅륓\u00059=皫之눃\u2047娤閍銤唫ၕb<\/w踲䔼u솆맚,䝒ᝳ'/it": "B餹饴is権ꖪ怯ꦂẉဎt\"!凢谵⧿0\\<=(uL䷍刨쑪>俆揓Cy襸Q힆䆭涷<\/ᐱ0ɧ䗾䚹\\ኜ?ꄢᇘ`䴢{囇}᠈䴥X4퓪檄]ꥷ/3謒ሴn+g騍X", "GgG꽬[(嫓몍6\u0004궍宩㙻/>\u0011^辍dT腪hxǑ%ꊇk,8(W⧂結P鬜O": [{ "M㴾c>\\ᓲ\u0019V{>ꤩ혙넪㭪躂TS-痴໸闓⍵/徯O.M㏥ʷD囎⧔쁳휤T??鉬뇙=#ꢫ숣BX䭼<\/d똬졬g榿)eꨋﯪ좇첻\u001a\u0011\";~쓆BH4坋攊7힪", "iT:L闞椕윚*滛gI≀Wਟඊ'ꢆ縺뱹鮚Nꩁ᧬蕼21줧\\䋯``⍐\\㏱鳨": 1927052677739832894, "쮁缦腃g]礿Y㬙 fヺSɪ꾾N㞈": [ null, null, { "!t,灝Y 1䗉罵?c饃호䉂Cᐭ쒘z(즽sZG㬣sഖE4뢜㓕䏞丮Qp簍6EZឪ겛fx'ꩱQ0罣i{k锩*㤴㯞r迎jTⲤ渔m炅肳": [ -3.3325685522591933E18, [{"㓁5]A䢕1룥BC?Ꙍ`r룔Ⳛ䙡u伲+\u0001്o": [ null, 4975309147809803991, null, null, {"T팘8Dﯲ稟MM☻㧚䥧/8ﻥ⥯aXLaH\"顾S☟耲ît7fS෉놁뮔/ꕼ䓈쁺4\\霶䠴ᩢ<\/t4?죵>uD5➶༆쉌럮⢀秙䘥\u20972ETR3濡恆vB? ~鸆\u0005": { "`閖m璝㥉b뜴?Wf;?DV콜\u2020퍉౓擝宏ZMj3mJ먡-傷뱙yח㸷꥿ ໘u=M읝!5吭L4v\\?ǎ7C홫": null, "|": false, "~Ztᛋ䚘\\擭㗝傪W陖+㗶qᵿ蘥ᙄp%䫎)}=⠔6ᮢS湟-螾-mXH?cp": 448751162044282216, "\u209fad놹j檋䇌ᶾ梕㉝bוּ": {"?苴ꩠD䋓帘5騱qﱖPF?☸珗顒yU ᡫcb䫎 S@㥚gꮒ쎘泴멖\\:I鮱TZ듒ᶨQ3+f7캙\"?\f풾\\o杞紟﻽M.⏎靑OP": [ -2.6990368911551596E18, [{"䒖@<᰿<\/⽬tTr腞&G%᳊秩蜰擻f㎳?S㵧\r*k뎾-乢겹隷j軛겷0룁鮁": {")DO0腦:춍逿:1㥨่!蛍樋2": [{ ",ꌣf侴笾m๫ꆽ?1?U?\u0011ꌈꂇ": { "x捗甠nVq䅦w`CD⦂惺嘴0I#vỵ} \\귂S끴D얾?Ԓj溯\"v餄a": { "@翙c⢃趚痋i\u0015OQ⍝lq돆Y0pࢥ3쉨䜩^<8g懥0w)]䊑n洺o5쭝QL댊랖L镈Qnt⪟㒅십q헎鳒⮤眉ᔹ梠@O縠u泌ㄘb榚癸XޔFtj;iC": false, "I&뱋゘|蓔䔕측瓯%6ᗻHW\\N1貇#?僐ᗜgh᭪o'䗈꽹Rc욏/蔳迄༝!0邔䨷푪8疩)[쭶緄㇈୧ፐ": { "B+:ꉰ`s쾭)빼C羍A䫊pMgjdx䐝Hf9᥸W0!C樃'蘿f䫤סи\u0017Jve? 覝f둀⬣퓉Whk\"஼=չﳐ皆笁BIW虨쫓F廰饞": -642906201042308791, "sb,XcZ<\/m㉹ ;䑷@c䵀s奤⬷7`ꘖ蕘戚?Feb#輜}p4nH⬮eKL트}": [ "RK鳗z=袤Pf|[,u욺", "Ẏᏻ罯뉋⺖锅젯㷻{H䰞쬙-쩓D]~\u0013O㳢gb@揶蔉|kᦂ❗!\u001ebM褐sca쨜襒y⺉룓", null, null, true, -1.650777344339075E-19, false, "☑lꄆs힨꤇]'uTന⌳농].1⋔괁沰\"IWഩ\u0019氜8쟇䔻;3衲恋,窌z펏喁횗?4?C넁问?ᥙ橭{稻Ⴗ_썔", "n?]讇빽嗁}1孅9#ꭨ靶v\u0014喈)vw祔}룼쮿I", -2.7033457331882025E18, { ";⚃^㱋x:饬ኡj'꧵T☽O㔬RO婎?향ᒭ搩$渣y4i;(Q>꿘e8q": "j~錘}0g;L萺*;ᕭꄮ0l潛烢5H▄쳂ꏒוֹꙶT犘≫x閦웧v", "~揯\u2018c4職렁E~ᑅቚꈂ?nq뎤.:慹`F햘+%鉎O瀜쟏敛菮⍌浢<\/㮺紿P鳆ࠉ8I-o?#jﮨ7v3Dt赻J9": null, "ࣝW䌈0ꍎqC逖,횅c၃swj;jJS櫍5槗OaB>D踾Y": {"㒰䵝F%?59.㍈cᕨ흕틎ḏ㋩B=9IېⓌ{:9.yw}呰ㆮ肒᎒tI㾴62\"ዃ抡C﹬B<\/촋jo朣", [ -7675533242647793366, {"ᙧ呃:[㒺쳀쌡쏂H稈㢤\u001dᶗGG-{GHྻຊꡃ哸䵬;$?&d\\⥬こN圴됤挨-'ꕮ$PU%?冕눖i魁q騎Q": [ false, [[ 7929823049157504248, [[ true, "Z菙\u0017'eꕤ᱕l,0\\X\u001c[=雿8蠬L<\/낲긯W99g톉4ퟋb㝺\u0007劁'!麕Q궈oW:@X၎z蘻m絙璩귓죉+3柚怫tS捇蒣䝠-擶D[0=퉿8)q0ٟ", "唉\nFA椭穒巯\\䥴䅺鿤S#b迅獘 ﶗ꬘\\?q1qN犠pX꜅^䤊⛤㢌[⬛휖岺q唻ⳡ틍\"㙙Eh@oA賑㗠y必Nꊑᗘ", -2154220236962890773, -3.2442003245397908E18, "Wᄿ筠:瘫퀩?o貸q⊻(᎞KWf宛尨h^残3[U(='橄", -7857990034281549164, 1.44283696979059942E18, null, {"ꫯAw跭喀 ?_9\"Aty背F=9缉ྦྷ@;?^鞀w:uN㘢Rỏ": [ 7.393662029337442E15, 3564680942654233068, [ false, -5253931502642112194, "煉\\辎ೆ罍5⒭1䪁䃑s䎢:[e5}峳ﴱn騎3?腳Hyꏃ膼N潭錖,Yᝋ˜YAၓ㬠bG렣䰣:", true, null, { "⒛'P&%죮|:⫶춞": -3818336746965687085, "钖m<\/0ݎMtF2Pk=瓰୮洽겎.": [[ -8757574841556350607, -3045234949333270161, null, { "Ꮬr輳>⫇9hU##w@귪A\\C 鋺㘓ꖐ梒뒬묹㹻+郸嬏윤'+g<\/碴,}ꙫ>손;情d齆J䬁ຩ撛챝탹/R澡7剌tꤼ?ặ!`⏲睤\u00002똥଴⟏": null, "\u20f2ܹe\\tAꥍư\\x当뿖렉禛;G檳ﯪS૰3~㘠#[J<}{奲 5箉⨔{놁<\/釿抋,嚠/曳m&WaOvT赋皺璑텁": [[ false, null, true, -5.7131445659795661E18, "萭m䓪D5|3婁ఞ>蠇晼6nﴺPp禽羱DS<睓닫屚삏姿", true, [ -8759747687917306831, { ">ⓛ\t,odKr{䘠?b퓸C嶈=DyEᙬ@ᴔ쨺芛髿UT퓻春<\/yꏸ>豚W釺N뜨^?꽴﨟5殺ᗃ翐%>퍂ဿ䄸沂Ea;A_\u0005閹殀W+窊?Ꭼd\u0013P汴G5썓揘": 4.342729067882445E-18, "Q^즾眆@AN\u0011Kb榰냎Y#䝀ꀒᳺ'q暇睵s\"!3#I⊆畼寤@HxJ9": false, "⿾D[)袨㇩i]웪䀤ᛰMvR<蟏㣨": {"v퇓L㪱ꖣ豛톤\\곱#kDTN": [{ "(쾴䡣,寴ph(C\"㳶w\"憳2s馆E!n!&柄<\/0Pꈗſ?㿳Qd鵔": {"娇堰孹L錮h嵅⛤躏顒?CglN束+쨣ﺜ\\MrH": {"獞䎇둃ቲ弭팭^ꄞ踦涟XK錆쳞ឌ`;੶S炥騞ଋ褂B៎{ڒ䭷ᶼ靜pI荗虶K$": [{"◖S~躘蒉꫿輜譝Q㽙闐@ᢗ¥E榁iء5┄^B[絮跉ᰥ遙PWi3wㄾⵀDJ9!w㞣ᄎ{듒ꓓb6\\篴??c⼰鶹⟧\\鮇ꮇ": [[ 654120831325413520, -1.9562073916357608E-19, { "DC(昐衵ἡ긙갵姭|֛[t": 7.6979110359897907E18, "J␅))嫼❳9Xfd飉j7猬ᩉ+⤻眗벎E鰉Zᄊ63zၝ69}ZᶐL崭ᦥ⡦靚⋛ꎨ~i㨃咊ꧭo䰠阀3C(": -3.5844809362512589E17, "p꣑팱쒬ꎑ뛡Ꙩ挴恍胔&7ᔈ묒4Hd硶훐㎖zꢼ豍㿢aሃ=<\/湉鵲EӅ%$F!퍶棌孼{O駍਺geu+": ")\u001b잓kŀX쩫A밁®ڣ癦狢)扔弒p}k縕ꩋ,䃉tࣼi", "ァF肿輸<솄G-䢹䛸ꊏl`Tqꕗ蒞a氷⸅ᴉ蠰]S/{J왲m5{9.uέ~㕚㣹u>x8U讁B덺襪盎QhVS맅킃i识{벂磄Iහ䙅xZy/抍૭Z鲁-霳V据挦ℒ": null, "㯛|Nꐸb7ⵐb?拠O\u0014ކ?-(EꞨ4ꕷᄤYᯕOW瞺~螸\"욿ќe㺰\"'㌢ƐW\u0004瞕>0?V鷵엳": true, "뤥G\\迋䠿[庩'꼡\u001aiᩮV쯁ᳪ䦪Ô;倱ନ뛁誈": null, "쥹䄆䚟Q榁䎐᢭<\/2㕣p}HW蟔|䃏꿈ꚉ锳2Pb7㙑Tⅹᵅ": { "Y?֭$>#cVBꩨ:>eL蒁務": { "86柡0po 䏚&-捑Ћ祌<\/휃-G*㶢הּ쩍s㶟餇c걺yu꽎還5*턧簕Og婥SꝐ": null, "a+葞h٥ࠆ裈嗫ﵢ5輙퀟ᛜ,QDﹼ⟶Y騠锪E_|x죗j侵;m蜫轘趥?븅w5+mi콛L": { ";⯭ﱢ!买F⽍柤鶂n䵣V㫚墱2렾ELEl⣆": [ true, -3.6479311868339015E-18, -7270785619461995400, 3.334081886177621E18, 2.581457786298155E18, -6.605252412954115E-20, -3.9232347037744167E-20, { "B6㊕.k1": null, "ZAꄮJ鮷ᳱo갘硥鈠䠒츼": { "ᕅ}럡}.@y陪鶁r業'援퀉x䉴ﵴl퍘):씭脴ᥞhiꃰblﲂ䡲엕8߇M㶭0燋標挝-?PCwe⾕J碻Ᾱ䬈䈥뷰憵賣뵓痬+": {"a췩v礗X⋈耓ፊf罅靮!㔽YYᣓw澍33⎔芲F|\"䜏T↮輦挑6ᓘL侘?ᅥ]덆1R௯✎餘6ꏽ<\/௨\\?q喷ꁫj~@ulq": {"嗫欆뾔Xꆹ4H㌋F嵧]ࠎ]㠖1ꞤT<$m뫏O i댳0䲝i": {"?෩?\u20cd슮|ꯆjs{?d7?eNs⢚嫥氂䡮쎱:鑵롟2hJꎒﯭ鱢3춲亄:뼣v䊭諱Yj択cVmR䩃㘬T\"N홝*ै%x^F\\_s9보zz4淗?q": [ null, "?", 2941869570821073737, "{5{殇0䝾g6밖퍋臩綹R$䖭j紋釰7sXI繳漪행y", false, "aH磂?뛡#惇d婅?Fe,쐘+늵䍘\"3r瘆唊勐j⳧࠴ꇓ<\/唕윈x⬌讣䋵%拗ᛆⰿ妴᝔M2㳗必꧂淲?ゥ젯檢<8끒MidX䏒3᳻Q▮佐UT|⤪봦靏⊏", [[{ "颉(&뜸귙{y^\"P퟉춝Ჟ䮭D顡9=?}Y誱<$b뱣RvO8cH煉@tk~4ǂ⤧⩝屋SS;J{vV#剤餓ᯅc?#a6D,s": [ -7.8781018564821536E16, true, [ -2.28770899315832371E18, false, -1.0863912140143876E-20, -6282721572097446995, 6767121921199223078, -2545487755405567831, false, null, -9065970397975641765, [ -5.928721243413937E-20, {"6촊\u001a홯kB0w撨燠룉{绎6⳹!턍贑y▾鱧ժ[;7ᨷ∀*땒䪮1x霆Hᩭ☔\"r䝐7毟ᝰr惃3ꉭE+>僒澐": [ "Ta쎩aƝt쵯ⰪVb", [ -5222472249213580702, null, -2851641861541559595, null, 4808804630502809099, 5657671602244269874, "5犲﨣4mᥣ?yf젫꾯|䋬잁$`Iⳉﴷ扳兝,'c", false, [ null, { "DyUIN쎾M仼惀⮥裎岶泭lh扠\u001e礼.tEC癯튻@_Qd4c5S熯A<\/\6U윲蹴Q=%푫汹\\\u20614b[௒C⒥Xe⊇囙b,服3ss땊뢍i~逇PA쇸1": -2.63273619193485312E17, "Mq꺋貘k휕=nK硍뫞輩>㾆~἞ࡹ긐榵l⋙Hw뮢帋M엳뢯v⅃^": 1877913476688465125, "ᶴ뻗`~筗免⚽টW˃⽝b犳䓺Iz篤p;乨A\u20ef쩏?疊m㝀컩뫡b탔鄃ᾈV(遢珳=뎲ିeF仢䆡谨8t0醄7㭧瘵⻰컆r厡궥d)a阄፷Ed&c﯄伮1p": null, "⯁w4曢\"(欷輡": "\"M᭫]䣒頳B\\燧ࠃN㡇j姈g⊸⺌忉ꡥF矉স%^", "㣡Oᄦ昵⫮Y祎S쐐級㭻撥>{I$": -378474210562741663, "䛒掷留Q%쓗1*1J*끓헩ᦢ﫫哉쩧EↅIcꅡ\\?ⴊl귛顮4": false, "寔愆샠5]䗄IH贈=d﯊/偶?ॊn%晥D視N򗘈'᫂⚦|X쵩넽z질tskxDQ莮Aoﱻ뛓": true, "钣xp?&\u001e侉/y䴼~?U篔蘚缣/I畚?Q绊": -3034854258736382234, "꺲໣眀)⿷J暘pИfAV삕쳭Nꯗ4々'唄ⶑ伻㷯騑倭D*Ok꧁3b␽_<\/챣Xm톰ၕ䆄`*fl㭀暮滠毡?": [ "D男p`V뙸擨忝븪9c麺`淂⢦Yw⡢+kzܖ\fY1䬡H歁)벾Z♤溊-혰셢?1<-\u0005;搢Tᐁle\\ᛵߓﭩ榩訝-xJ;巡8깊蠝ﻓU$K": { "Vꕡ諅搓W=斸s︪vﲜ츧$)iꡟ싉e寳?ጭムVથ嵬i楝Fg<\/Z|៪ꩆ-5'@ꃱ80!燱R쇤t糳]罛逇dṌ֣XHiͦ{": true, "Ya矲C멗Q9膲墅携휻c\\딶G甔<\/.齵휴": -1.1456247877031811E-19, "z#.OO￝J": -8263224695871959017, "崍_3夼ᮟ1F븍뽯ᦓ鴭V豈Ь": [{ "N蒬74": null, "yuB?厅vK笗!ᔸcXQ旦컶P-녫mᄉ麟_": "1R@ 톘xa_|﩯遘s槞d!d껀筤⬫薐焵먑D{\\6k共倌☀G~AS_D\"딟쬚뮥馲렓쓠攥WTMܭ8nX㩴䕅檹E\u0007ﭨN 2 ℆涐ꥏ꠵3▙玽|됨_\u2048", "恐A C䧩G": {":M큣5e들\\ꍀ恼ᔄ靸|I﨏$)n": { "|U䬫㟯SKV6ꛤ㗮\bn봻䲄fXT:㾯쳤'笓0b/ೢC쳖?2浓uO.䰴": "ཐ꼋e?``,ᚇ慐^8ꜙNM䂱\u0001IᖙꝧM'vKdꌊH牮r\\O@䊷ᓵ쀆(fy聻i툺\"?<\/峧ࣞ⓺ᤤ쵒߯ꎺ騬?)刦\u2072l慪y꺜ﲖTj+u", "뽫hh䈵w>1ⲏ쐭V[ⅎ\\헑벑F_㖝⠗㫇h恽;῝汰ᱼ瀖J옆9RR셏vsZ柺鶶툤r뢱橾/ꉇ囦FGm\"謗ꉦ⨶쒿⥡%]鵩#ᖣ_蹎 u5|祥?O", null, 2.0150326776036215E-19, null, true, false, true, {"\fa᭶P捤WWc᠟f뚉ᬏ퓗ⳀW睹5:HXH=q7x찙X$)모r뚥ᆟ!Jﳸf": [ -2995806398034583407, [ 6441377066589744683, "Mﶒ醹i)Gἦ廃s6몞 KJ౹礎VZ螺费힀\u0000冺업{谥'꡾뱻:.ꘘ굄奉攼Di᷑K鶲y繈욊阓v㻘}枭캗e矮1c?휐\"4\u0005厑莔뀾墓낝⽴洗ṹ䇃糞@b1\u0016즽Y轹", { "1⽕⌰鉟픏M㤭n⧴ỼD#%鐘⊯쿼稁븣몐紧ᅇ㓕ᛖcw嬀~ഌ㖓(0r⧦Q䑕髍ര铂㓻R儮\"@ꇱm❈௿᦯頌8}㿹犴?xn잆꥽R": 2.07321075750427366E18, "˳b18㗈䃟柵Z曆VTAu7+㛂cb0﯑Wp執<\/臋뭡뚋刼틮荋벲TLP预庰܈G\\O@VD'鱃#乖끺*鑪ꬳ?Mޞdﭹ{␇圯쇜㼞顄︖Y홡g": [{ "0a,FZ": true, "2z̬蝣ꧦ驸\u0006L↛Ḣ4๚뿀'?lcwᄧ㐮!蓚䃦-|7.飑挴.樵*+1ﮊ\u0010ꛌ%貨啺/JdM:똍!FBe?鰴㨗0O财I藻ʔWA᫓G쳛u`<\/I": [{ "$τ5V鴐a뾆両環iZp頻යn븃v": -4869131188151215571, "*즢[⦃b礞R◚nΰꕢH=귰燙[yc誘g䆌?ଜ臛": { "洤湌鲒)⟻\\䥳va}PeAMnN[": "㐳ɪ/(軆lZR,Cp殍ȮN啷\"3B婴?i=r$펽ᤐ쀸", "阄R4㒿㯔ڀ69ZᲦ2癁핌噗P崜#\\-쭍袛&鐑/$4童V꩑_ZHA澢fZ3": {"x;P{긳:G閉:9?活H": [ "繺漮6?z犞焃슳\">ỏ[Ⳛ䌜녏䂹>聵⼶煜Y桥[泥뚩MvK$4jtロ", "E#갶霠좭㦻ୗ먵F+䪀o蝒ba쮎4X㣵 h", -335836610224228782, null, null, [ "r1᫩0>danjY짿bs{", [ -9.594464059325631E-23, 1.0456894622831624E-20, null, 5.803973284253454E-20, -8141787905188892123, true, -4735305442504973382, 9.513150514479281E-20, "7넳$螔忷㶪}䪪l짴\u0007鹁P鰚HF銏ZJﳴ/⍎1ᷓ忉睇ᜋ쓈x뵠m䷐窥Ꮤ^\u0019ᶌ偭#ヂt☆၃pᎍ臶䟱5$䰵&๵分숝]䝈뉍♂坎\u0011<>", "C蒑貑藁lﰰ}X喇몛;t밿O7/᯹f\u0015kI嘦<ዴ㟮ᗎZ`GWퟩ瑹࡮ᅴB꿊칈??R校s脚", { "9珵戬+AU^洘拻ቒy柭床'粙XG鞕᠜繀伪%]hC,$輙?Ut乖Qm떚W8઼}~q⠪rU䤶CQ痗ig@#≲t샌f㈥酧l;y闥ZH斦e⸬]j⸗?ঢ拻퀆滌": null, "畯}㧢J罚帐VX㨑>1ꢶkT⿄蘥㝑o|<嗸層沈挄GEOM@-䞚䧰$만峬輏䠱V✩5宸-揂D'㗪yP掶7b⠟J㕻SfP?d}v㼂Ꮕ'猘": { "陓y잀v>╪": null, "鬿L+7:됑Y=焠U;킻䯌잫!韎ஔ\f": { "駫WmGጶ": { "\\~m6狩K": -2586304199791962143, "ႜࠀ%͑l⿅D.瑢Dk%0紪dḨTI픸%뗜☓s榗኉\"?V籄7w髄♲쟗翛歂E䤓皹t ?)ᄟ鬲鐜6C": { "_췤a圷1\u000eB-XOy缿請∎$`쳌eZ~杁튻/蜞`塣৙\"⪰\"沒l}蕌\\롃荫氌.望wZ|o!)Hn獝qg}": null, "kOSܧ䖨钨:಼鉝ꭝO醧S`십`ꓭ쭁ﯢN&Et㺪馻㍢ⅳ㢺崡ຊ蜚锫\\%ahx켨|ż劻ꎄ㢄쐟A躊᰹p譞綨Ir쿯\u0016ﵚOd럂*僨郀N*b㕷63z": { ":L5r+T㡲": [{ "VK泓돲ᮙRy㓤➙Ⱗ38oi}LJቨ7Ó㹡৘*q)1豢⛃e᫛뙪壥镇枝7G藯g㨛oI䄽 孂L缊ꋕ'EN`": -2148138481412096818, "`⛝ᘑ$(खꊲ⤖ᄁꤒ䦦3=)]Y㢌跨NĴ驳줟秠++d孳>8ᎊ떩EꡣSv룃 쯫أ?#E|᭙㎐?zv:5祉^⋑V": [ -1.4691944435285607E-19, 3.4128661569395795E17, "㐃촗^G9佭龶n募8R厞eEw⺡_ㆱ%⼨D뉄퉠2ꩵᛅⳍ搿L팹Lවn=\"慉념ᛮy>!`g!풲晴[/;?[v겁軇}⤳⤁핏∌T㽲R홓遉㓥", "愰_⮹T䓒妒閤둥?0aB@㈧g焻-#~跬x<\/舁P݄ꐡ=\\׳P\u0015jᳪᢁq;㯏l%᭗;砢觨▝,謁ꍰGy?躤O黩퍋Y㒝a擯\n7覌똟_䔡]fJ晋IAS", 4367930106786121250, -4.9421193149720582E17, null, { ";ᄌ똾柉곟ⰺKpፇ䱻ฺ䖝{o~h!eꁿ઻욄ښ\u0002y?xUd\u207c悜ꌭ": [ 1.6010824122815255E-19, [ "宨︩9앉檥pr쇷?WxLb", "氇9】J玚\u000f옛呲~ 輠1D嬛,*mW3?n휂糊γ虻*ᴫ꾠?q凐趗Ko↦GT铮", "㶢ថmO㍔k'诔栀Z蛟}GZ钹D", false, -6.366995517736813E-20, -4894479530745302899, null, "V%᫡II璅䅛䓎풹ﱢ/pU9se되뛞x梔~C)䨧䩻蜺(g㘚R?/Ự[忓C뾠ࢤc왈邠买?嫥挤풜隊枕", ",v碍喔㌲쟚蔚톬៓ꭶ", 3.9625444752577524E-19, null, [ "kO8란뿒䱕馔b臻⍟隨\"㜮鲣Yq5m퐔K#ꢘug㼈ᝦ=P^6탲@䧔%$CqSw铜랊0&m⟭<\/a逎ym\u0013vᯗ": true, "洫`|XN뤮\u0018詞=紩鴘_sX)㯅鿻Ố싹": 7.168252736947373E-20, "ꛊ饤ﴏ袁(逊+~⽫얢鈮艬O힉7D筗S곯w操I斞᠈븘蓷x": [[[[ -7.3136069426336952E18, -2.13572396712722688E18, { "硢3㇩R:o칢行E<=\u0018ၬYuH!\u00044U%卝炼2>\u001eSi$⓷ꒈ'렢gᙫ番ꯒ㛹럥嶀澈v;葷鄕x蓎\\惩+稘UEᖸﳊ㊈壋N嫿⏾挎,袯苷ኢ\\x|3c": 7540762493381776411, "?!*^ᢏ窯?\u0001ڔꙃw虜돳FgJ?&⨫*uo籤:?}ꃹ=ٴ惨瓜Z媊@ત戹㔏똩Ԛ耦Wt轁\\枒^\\ꩵ}}}ꀣD\\]6M_⌫)H豣:36섘㑜": { ";홗ᰰU஋㙛`D왔ཿЃS회爁\u001b-㢈`봆?盂㛣듿ᦾ蒽_AD~EEຆ㊋(eNwk=Rɠ峭q\"5Ἠ婾^>'ls\n8QAK)- Q䲌mo펹L_칍樖庫9꩝쪹ᘹ䑖瀍aK ?*趤f뭓廝p=磕", "哑z懅ᤏ-ꍹux쀭", [ true, 3998739591332339511, "ጻ㙙?᳸aK<\/囩U`B3袗ﱱ?\"/k鏔䍧2l@쿎VZ쨎/6ꃭ脥|B?31+on颼-ꮧ,O嫚m ࡭`KH葦:粘i]aSU쓙$쐂f+詛頖b", [{"^<9<箝&絡;%i﫡2攑紴\\켉h쓙-柂䚝ven\u20f7浯-Ꮏ\r^훁䓚헬\u000e?\\ㅡֺJ떷VOt": [{ "-௄卶k㘆혐஽y⎱㢬sS઄+^瞥h;ᾷj;抭\u0003밫f<\/5Ⱗ裏_朻%*[-撵䷮彈-芈": { "㩩p3篊G|宮hz䑊o곥j^Co0": [ 653239109285256503, {"궲?|\":N1ۿ氃NZ#깩:쇡o8킗ࡊ[\"됸Po핇1(6鰏$膓}⽐*)渽J'DN<썙긘毦끲Ys칖": { "2Pr?Xjㆠ?搮/?㓦柖馃5뚣Nᦼ|铢r衴㩖\"甝湗ܝ憍": "\"뾯i띇筝牻$珲/4ka $匝휴译zbAᩁꇸ瑅&뵲衯ꎀᆿ7@ꈋ'ᶨH@ᠴl+", "7뢽뚐v?4^ꊥ_⪛.>pởr渲<\/⢕疻c\"g䇘vU剺dஔ鮥꒚(dv祴X⼹\\a8y5坆": true, "o뼄B욞羁hr﷔폘뒚⿛U5pꪴfg!6\\\"爑쏍䢱W<ﶕ\\텣珇oI/BK뺡'谑♟[Ut븷亮g(\"t⡎有?ꬊ躺翁艩nl F⤿蠜": 1695826030502619742, "ۊ깖>ࡹ햹^ⵕ쌾BnN〳2C䌕tʬ]찠?ݾ2饺蹳ぶꌭ訍\"◹ᬁD鯎4e滨T輀ﵣ੃3\u20f3킙D瘮g\\擦+泙ၧ 鬹ﯨַ肋7놷郟lP冝{ߒhড়r5,꓋": null, "ΉN$y{}2\\N﹯ⱙK'8ɜͣwt,.钟廣䎘ꆚk媄_": null, "䎥eᾆᝦ읉,Jުn岪㥐s搖謽䚔5t㯏㰳㱊ZhD䃭f絕s鋡篟a`Q鬃┦鸳n_靂(E4迠_觅뷝_宪D(NL疶hL追V熑%]v肫=惂!㇫5⬒\u001f喺4랪옑": { "2a輍85먙R㮧㚪Sm}E2yꆣꫨrRym㐱膶ᔨ\\t綾A☰.焄뙗9<쫷챻䒵셴᭛䮜.<\/慌꽒9叻Ok䰊Z㥪幸k": [ null, true, {"쌞쐍": { "▟GL K2i뛱iQ\"̠.옛1X$}涺]靎懠ڦ늷?tf灟ݞゟ{": 1.227740268699265E-19, "꒶]퓚%ฬK❅": [{ "(ෛ@Ǯっ䧼䵤[aテൖvEnAdU렖뗈@볓yꈪ,mԴ|꟢캁(而첸죕CX4Y믅": "2⯩㳿ꢚ훀~迯?᪑\\啚;4X\u20c2襏B箹)俣eỻw䇄", "75༂f詳䅫ꐧ鏿 }3\u20b5'∓䝱虀f菼Iq鈆﨤g퍩)BFa왢d0뮪痮M鋡nw∵謊;ꝧf美箈ḋ*\u001c`퇚퐋䳫$!V#N㹲抗ⱉ珎(V嵟鬒_b㳅\u0019": null, "e_m@(i㜀3ꦗ䕯䭰Oc+-련0뭦⢹苿蟰ꂏSV䰭勢덥.ྈ爑Vd,ᕥ=퀍)vz뱊ꈊB_6듯\"?{㒲&㵞뵫疝돡믈%Qw限,?\r枮\"? N~癃ruࡗdn&": null, "㉹&'Pfs䑜공j<\/?|8oc᧨L7\\pXᭁ 9᪘": -2.423073789014103E18, "䝄瑄䢸穊f盈᥸,B뾧푗횵B1쟢f\u001f凄": "魖⚝2儉j꼂긾껢嗎0ࢇ纬xI4](੓`蕞;픬\fC\"斒\")2櫷I﹥迧", "ퟯ詔x悝령+T?Bg⥄섅kOeQ큼㻴*{E靼6氿L缋\u001c둌๶-㥂2==-츫I즃㠐Lg踞ꙂEG貨鞠\"\u0014d'.缗gI-lIb䋱ᎂDy缦?": null, "紝M㦁犿w浴詟棓쵫G:䜁?V2ힽ7N*n&㖊Nd-'ຊ?-樹DIv⊜)g䑜9뉂ㄹ푍阉~ꅐ쵃#R^\u000bB䌎䦾]p.䀳": [{"ϒ爛\"ꄱ︗竒G䃓-ま帳あ.j)qgu扐徣ਁZ鼗A9A鸦甈!k蔁喙:3T%&㠘+,䷞|챽v䚞문H<\/醯r셓㶾\\a볜卺zE䝷_죤ဵ뿰᎟CB": [ 6233512720017661219, null, -1638543730522713294, false, -8901187771615024724, [ 3891351109509829590, true, false, -1.03836679125188032E18, { "j랎:g曞ѕᘼ}链N", -1.1103819473845426E-19, true, [ true, null, -7.9091791735309888E17, true, {"}蔰鋈+ꐨ啵0?g*사%`J?*": [{ "\"2wG?yn,癷BK\\龞䑞x?蠢": -3.7220345009853505E-19, ";饹়❀)皋`噿焒j(3⿏w>偍5X薙婏聿3aFÆÝ": "2,ꓴg?_섦_>Y쪥션钺;=趘F~?D㨫\bX?㹤+>/믟kᠪ멅쬂Uzỵ]$珧`m雁瑊ඖ鯬cꙉ梢f묛bB", "♽n$YjKiXX*GO贩鏃豮祴遞K醞眡}ꗨv嵎꼷0୸+M菋eH徸J꣆:⼐悥B켽迚㯃b諂\u000bjꠜ碱逮m8": [ "푷᣺ﻯd8ﱖ嬇ភH鹎⡱᱅0g:果6$GQ췎{vᷧYy-脕x偹砡館⮸C蓼ꏚ=軄H犠G谖ES詤Z蠂3l봟hᅭ7䦹1GPQG癸숟~[#駥8zQ뛣J소obg,", null, 1513751096373485652, null, -6.851466660824754E-19, {"䩂-⴮2ٰK솖풄꾚ႻP앳1H鷛wmR䗂皎칄?醜<\/&ࠧ㬍X濬䵈K`vJ륒Q/IC묛!;$vϑ": { "@-ꚗxྐྵ@m瘬\u0010U絨ﮌ驐\\켑寛넆T=tQ㭤L연@脸삯e-:⩼u㎳VQ㋱襗ຓ<Ⅶ䌸cML3+\u001e_C)r\\9+Jn\\Pﺔ8蠱檾萅Pq鐳话T䄐I": -1.80683891195530061E18, "ᷭዻU~ཷsgSJ`᪅'%㖔n5픆桪砳峣3獮枾䌷⊰呀": { "Ş੉䓰邟自~X耤pl7间懑徛s첦5ਕXexh⬖鎥᐀nNr(J컗|ૃF\"Q겮葲놔엞^겄+㈆话〾희紐G'E?飕1f❼텬悚泬먐U睬훶Qs": false, "(\u20dag8큽튣>^Y{뤋.袊䂓;_g]S\u202a꽬L;^'#땏bႌ?C緡<䝲䲝断ꏏ6\u001asD7IK5Wxo8\u0006p弊⼂ꯍ扵\u0003`뵂픋%ꄰ⫙됶l囏尛+䗅E쟇\\": [ true, { "\n鱿aK㝡␒㼙2촹f;`쾏qIࡔG}㝷䐍瓰w늮*粅9뒪ㄊCj倡翑閳R渚MiUO~仨䜶RꙀA僈㉋⦋n{㖥0딿벑逦⥻0h薓쯴Ꝼ": [ 5188716534221998369, 2579413015347802508, 9.010794400256652E-21, -6.5327297761238093E17, 1.11635352494065523E18, -6656281618760253655, { "": ")?", "TWKLꑙ裑꺔UE俸塑炌Ũ᜕-o\"徚#": {"M/癟6!oI51ni퐚=댡>xꍨ\u0004 ?": { "皭": {"⢫䋖>u%w잼<䕏꘍P䋵$魋拝U䮎緧皇Y훂&|羋ꋕ잿cJ䨈跓齳5\u001a삱籷I꿾뤔S8㌷繖_Yឯ䲱B턼O歵F\\l醴o_欬6籏=D": [ false, true, {"Mt|ꏞD|F궣MQ뵕T,띺k+?㍵i": [ 7828094884540988137, false, { "!༦鯠,&aﳑ>[euJꏽ綷搐B.h": -7648546591767075632, "-n켧嘰{7挐毄Y,>❏螵煫乌pv醑Q嶚!|⌝責0왾덢ꏅ蛨S\\)竰'舓Q}A釡5#v": 3344849660672723988, "8閪麁V=鈢1녈幬6棉⪮둌\u207d᚛驉ꛃ'r䆉惏ै|bἧﺢᒙ<=穊强s혧eꮿ慩⌡ \\槳W븧J檀C,ᘉ의0俯퀉M;筷ࣴ瓿{늊埂鄧_4揸Nn阼Jੵ˥(社": true, "o뼀vw)4A뢵(a䵢)p姃뛸\u000fK#KiQp\u0005ꅍ芅쏅": null, "砥$ꥸ┇耽u斮Gc{z빔깎밇\\숰\u001e괷各㶇쵿_ᴄ+h穢p촀Ნ䃬z䝁酳ӂ31xꔄ1_砚W렘G#2葊P ": [ -3709692921720865059, null, [ 6669892810652602379, -135535375466621127, "뎴iO}Z? 馢녱稹ᄾ䐩rSt帤넆&7i騏멗畖9誧鄜'w{Ͻ^2窭외b㑎粖i矪ꦨ탪跣)KEㆹ\u0015V8[W?⽉>'kc$䨘ᮛ뉻٬M5", 1.10439588726055846E18, false, -4349729830749729097, null, [ false, "_蠢㠝^䟪/D녒㡋ỎC䒈판\u0006એq@O펢%;鹐쏌o戥~A[ꡉ濽ỳ&虃᩾荣唙藍茨Ig楡꒻M窓冉?", true, 2.17220752996421728E17, -5079714907315156164, -9.960375974658589E-20, "ᾎ戞༒", true, false, [[ "ⶉᖌX⧕홇)g엃⹪x뚐癟\u0002", -5185853871623955469, { "L㜤9ợㇶK鐰⋓V뽋˖!斫as|9"፬䆪?7胜&n薑~": -2.11545634977136992E17, "O8뀩D}캖q萂6༣㏗䈓煮吽ਆᎼDᣘ폛;": false, "YTᡅ^L㗎cbY$pᣞ縿#fh!ꘂb삵玊颟샞ဢ$䁗鼒몁~rkH^:닮먖츸륈⪺쒉砉?㙓扫㆕꣒`R䢱B酂?C뇞<5Iޚ讳騕S瞦z": null, "\\RB?`mG댵鉡幐物䵎有5*e骄T㌓ᛪ琾駒Ku\u001a[柆jUq8⋈5鿋츿myﻗ?雍ux঴?": 5828963951918205428, "n0晅:黯 xu씪^퓞cB㎊ᬍ⺘٤փ~B岚3㥕擄vᲂ~F?C䶖@$m~忔S왖㲚?챴⊟W#벌{'㰝I䝠縁s樘\\X뢻9핡I6菍ㄛ8쯶]wॽ0L\"q": null, "x增줖j⦦t䏢᎙㛿Yf鼘~꫓恄4惊\u209c": "oOhbᤃ᛽z&Bi犑\\3B㩬劇䄑oŁ쨅孥멁ຖacA㖫借㞝vg싰샂㐜#譞⢤@k]鋰嘘䜾L熶塥_<\/⍾屈ﮊ_mY菹t뙺}Ox=w鮮4S1ꐩמּ'巑", "㗓蟵ꂾe蠅匳(JP䗏෸\u0089耀왲": [{ "ᤃ㵥韎뤽\r?挥O쯡⇔㞚3伖\u0005P⋪\"D궣QLn(⚘罩䩢Ŏv䤘尗뼤됛O淽鋋闚r崩a{4箙{煷m6〈": { "l곺1L": { "T'ਤ?砅|੬Km]䄩\"(࿶<\/6U爢䫈倔郴l2㴱^줣k'L浖L鰄Rp今鎗⒗C얨M훁㡧ΘX粜뫈N꤇輊㌻켑#㮮샶-䍗룲蠝癜㱐V>=\\I尬癤t=": 7648082845323511446, "鋞EP:<\/_`ၧe混ㇹBd⯢㮂驋\\q碽饩跓྿ᴜ+j箿렏㗑yK毢宸p謹h䦹乕U媣\\炤": [[ "3", [ true, 3.4058271399411134E-20, true, "揀+憱f逮@먻BpW曉\u001a㣐⎊$n劈D枤㡞좾\u001aᛁ苔౩闝1B䷒Ṋ݋➐ꀞꐃ磍$t੤_:蘺⮼(#N", 697483894874368636, [ "vᘯ锴)0訶}䳅⩚0O壱韈ߜ\u0018*U鍾䏖=䧉뽑单휻ID쿇嘗?ꌸῬ07", -5.4858784319382006E18, 7.5467775182251151E18, -8911128589670029195, -7531052386005780140, null, [ null, true, [[{ "1欯twG<\/Q:0怯押殃탷聫사<ỗꕧ蚨䡁nDꌕ\u001c녬~蓩鲃g儊>ꏡl㻿/⑷*챳6㻜W毤緛ﹺᨪ4\u0013뺚J髬e3쳸䘦伧?恪&{L掾p+꬜M䏊d娘6": { "2p첼양棜h䜢﮶aQ*c扦v︥뮓kC寵횂S銩&ǝ{O*य़iH`U큅ࡓr䩕5ꄸ?`\\᧫?ᮼ?t〟崾훈k薐ì/iy꤃뵰z1<\/AQ#뿩8jJ1z@u䕥": 1.82135747285215155E18, "ZdN &=d년ᅆ'쑏ⅉ:烋5&៏ᄂ汎来L㯄固{钧u\\㊏튚e摑&t嗄ꖄUb❌?m䴘熚9EW": [{ "ଛ{i*a(": -8.0314147546006822E17, "⫾ꃆY\u000e+W`௸ \"M뒶+\\뷐lKE}(NT킶Yj選篒쁶'jNQ硾(똡\\\"逌ⴍy? IRꜘ὞鄬﨧:M\\f⠋Cꚜ쫊ᚴNV^D䕗ㅖἔIao꿬C⍏8": [ 287156137829026547, { "H丞N逕⯲": {"": { "7-;枮阕梒9ᑄZ": [[[[ null, { "": [[[[ -7.365909561486078E-19, 2948694324944243408, null, [ true, "荒\"并孷䂡쵼9o䀘F\u0002龬7⮹Wz%厖/*? a*R枈㌦됾g뒠䤈q딄㺿$쮸tᶎ릑弣^鏎<\/Y鷇驜L鿽<\/춋9Mᲆឨ^<\/庲3'l낢", "c鮦\u001b두\\~?眾ಢu݆綑෪蘛轋◜gȃ<\/ⴃcpkDt誩܅\"Y", [[ null, null, [ 3113744396744005402, true, "v(y", { "AQ幆h쾜O+꺷铀ꛉ練A蚗⼺螔j㌍3꽂楎䥯뎸먩?": null, "蠗渗iz鱖w]擪E": 1.2927828494783804E-17, "튷|䀭n*曎b✿~杤U]Gz鄭kW|㴚#㟗ഠ8u擨": [[ true, null, null, {"⾪壯톽g7?㥜ώQꑐ㦀恃㧽伓\\*᧰閖樧뢇赸N휶䎈pI氇镊maᬠ탷#X?A+kНM ༑᩟؝?5꧎鰜ṚY즫궔 =ঈ;ﳈ?*s|켦蜌wM笙莔": [ null, -3808207793125626469, [ -469910450345251234, 7852761921290328872, -2.7979740127017492E18, 1.4458504352519893E-20, true, "㽙깹?먏䆢:䴎ۻg殠JBTU⇞}ꄹꗣi#I뵣鉍r혯~脀쏃#釯:场:䔁>䰮o'㼽HZ擓௧nd", [ 974441101787238751, null, -2.1647718292441327E-19, 1.03602824249831488E18, [ null, 1.0311977941822604E-17, false, true, { "": -3.7019778830816707E18, "E峾恆茍6xLIm縂0n2视֯J-ᤜz+ᨣ跐mYD豍繹⹺䊓몓ﴀE(@詮(!Y膽#᎙2䟓섣A䈀㟎,囪QbK插wcG湎ꤧtG엝x⥏俎j'A一ᯥ뛙6ㅑ鬀": 8999803005418087004, "よ殳\\zD⧅%Y泥簳Uꈩ*wRL{3#3FYHା[d岀䉯T稉駅䞘礄P:闈W怏ElB㤍喬赔bG䠼U଄Nw鰯闀楈ePsDꥷ꭬⊊": [ 6.77723657904486E-20, null, [ "ཚ_뷎꾑蹝q'㾱ꂓ钚蘞慵렜떆`ⴹ⎼櫯]J?[t9Ⓢ !컶躔I᮸uz>3a㠕i,錃L$氰텰@7녫W㸮?羧W뇧ꃞ,N鋮숪2ɼ콏┍䁲6", "&y?뢶=킕올Za惻HZk>c\u20b58i?ꦶcfBv잉ET9j䡡", "im珊Ճb칧校\\뼾쯀", 9.555715121193197E-20, true, { "<㫚v6腓㨭e1㕔&&V∌ᗈT奄5Lጥ>탤?튣瑦㳆ꉰ!(ᙪ㿬擇_n쌯IMΉ㕨␰櫈ᱷ5풔蟹&L.첽e鰷쯃劼﫭b#ﭶ퓀7뷄Wr㢈๧Tʴશ㶑澕鍍%": -1810142373373748101, "fg晌o?߲ꗄ;>C>?=鑰監侯Kt굅": true, "䫡蓺ꑷ]C蒹㦘\"1ః@呫\u0014NL䏾eg呮፳,r$裢k>/\\?ㄤᇰﻛ쉕1஥'Ċ\" \\_?쨔\"ʾr: 9S䘏禺ᪧꄂ㲄", [[{ "*硙^+E쌺I1䀖ju?:⦈Ꞓl๴竣迃xKC/饉:\fl\"XTFᄄ蟭,芢<\/骡軺띜hꏘ\u001f銿<棔햳▨(궆*=乥b8\\媦䷀뫝}닶ꇭ(Kej䤑M": [{ "1Ꮼ?>옿I╅C<ގ?ꊌ冉SV5A㢊㶆z-๎玶绢2F뵨@㉌뀌o嶔f9-庒茪珓뷳4": null, ";lᰳ": "CbB+肻a䄷苝*/볳+/4fq=㰁h6瘉샴4铢Y骐.⌖@哼猎㦞+'gꋸ㒕ߤ㞑(䶒跲ti⑴a硂#No볔", "t?/jE幸YHT셵⩎K!Eq糦ꗣv刴w\"l$ο:=6:移": { "z]鑪醊嫗J-Xm銌翁絨c里됏炙Ep㣋鏣똼嚌䀓GP﹖cmf4鹭T䅿꣭姧␸wy6ꦶ;S&(}ᎧKxᾂQ|t뻳k\"d6\"|Ml췆hwLt꼼4$&8Պ褵婶鯀9": {"嵃닢ᒯ'd᧫䳳#NXe3-붋鸿ଢ떓%dK\u0013䲎ꖍYV.裸R⍉rR3蟛\\:젯:南ĺLʆ넕>|텩鴷矔ꋅⒹ{t孶㓑4_": [ true, null, [ false, "l怨콈lᏒ", { "0w䲏嬧-:`䉅쉇漧\\܂yㄨb%㽄j7ᦶ涶<": 3.7899452730383747E-19, "ꯛTẀq纤q嶏V⿣?\"g}ი艹(쥯B T騠I=仵및X": {"KX6颠+&ᅃ^f畒y[": { "H?뱜^?꤂-⦲1a㋞&ꍃ精Ii᤾챪咽쬘唂쫷<땡劈훫놡o㥂\\ KⴙD秼F氮[{'좴:례晰Iq+I쭥_T綺砸GO煝䟪ᚪ`↹l羉q쐼D꽁ᜅ훦: vUV": true, "u^yﳍ0㱓#[y뜌앸ꊬL㷩?蕶蘾⻍KӼ": -7931695755102841701, "䤬轉車>\u001c鴵惋\"$쯃྆⇻n뽀G氠S坪]ಲꨍ捇Qxኻ椕駔\\9ࣼ﫻읜磡煮뺪ᶚ볝l㕆t+sζ": [[[ true, false, [ null, 3363739578828074923, true, { "\"鸣詩 볰㑵gL㯦῅춝旫}ED辗ﮈI쀤-ꧤ|㠦Z\"娑ᕸ4爏騍㣐\"]쳝Af]茛⬻싦o蚁k䢯䩐菽3廇喑ޅ": 4.5017999150704666E17, "TYႇ7ʠ值4챳唤~Zo&ݛ": false, "`塄J袛㭆끺㳀N㺣`꽐嶥KﯝSVᶔ∲퀠獾N딂X\"ᤏhNﬨvI": {"\u20bb㭘I䖵䰼?sw䂷쇪](泒f\"~;꼪Fԝsᝦ": {"p,'ꉂ軿=A蚶?bƉ㏵䅰諬'LYKL6B깯⋩겦뎙(ᜭ\u0006噣d꾆㗼Z;䄝䚔cd<情@䞂3苼㸲U{)<6&ꩻ钛\u001au〷N숨囖愙j=BXW욕^x芜堏Ῑ爂뛷꒻t✘Q\b": [[ "籛&ଃ䩹.ꃩ㦔\\C颫#暪&!勹ꇶ놽攺J堬镙~軌C'꾖䣹㮅岃ᙴ鵣", 4.317829988264744E15, 6.013585322002147E-20, false, true, null, null, -3.084633632357326E-20, false, null, { "\"짫愔昻 X\"藣j\"\"먁ཅѻ㘤㬯0晲DU꟒㸃d벀윒l䦾c੻*3": null, "谈Wm陧阦咟ฯ歖擓N喴㋐銭rCCnVࢥ^♼Ⅾ젲씗刊S༝+_t赔\\b䚍뉨ꬫ6펛cL䊘᜼<\/澤pF懽&H": [ null, { "W\"HDUuΌ퀟M'P4࿰H똆ⰱﮯ<\/凐蘲\"C鴫ﭒж}ꭩ쥾t5yd诪ﮡ퍉ⴰ@?氐醳rj4I6Qt": 6.9090159359219891E17, "絛ﳛ⺂": {"諰P㗮聦`ZQ?ꫦh*റcb⧱}埌茥h{棩렛툽o3钛5鮁l7Q榛6_g)ὄ\u0013kj뤬^爖eO4Ⱈ槞鉨ͺ订%qX0T썗嫷$?\\\"봅늆'%": [ -2.348150870600346E-19, [[ true, -6619392047819511778, false, [[ -1.2929189982356161E-20, 1.7417192219309838E-19, {"?嵲2࿐2\u0001啑㷳c縯": [ null, [ false, true, 2578060295690793218, { "?\"殃呎#㑑F": true, "}F炊_殛oU헢兔Ꝉ,赭9703.B数gTz3⏬": { "5&t3,햓Mݸᵣ㴵;꣫䩍↳#@뫷䠅+W-ࣇzᓃ鿕ಔ梭?T䮑ꥬ旴]u뫵막bB讍:왳둛lEh=숾鱠p咐$짏#?g⹷ᗊv㷵.斈u頻\u0018-G.": "뽙m-ouࣤ஫牷\"`Ksꕞ筼3HlȨvC堈\"I]㖡玎r먞#'W賜鴇k'c룼髋䆿飉㗆xg巤9;芔cጐ/ax䊨♢큓r吓㸫೼䢗da᩾\"]屣`", ":M딪<䢥喠\u0013㖅x9蕐㑂XO]f*Q呰瞊吭VP@9,㨣 D\\穎vˤƩs㜂-曱唅L걬/롬j㈹EB8g<\/섩o渀\"u0y&룣": ">氍緩L/䕑돯Ꟙ蕞^aB뒣+0jK⪄瑨痜LXK^힦1qK{淚t츔X:Vm{2r獁B뾄H첚7氥?쉟䨗ꠂv팳圎踁齀\\", "D彤5㢷Gꪻ[lㄆ@὜⓰絳[ଃ獽쮹☒[*0ꑚ㜳": 9022717159376231865, "ҖaV銣tW+$魿\u20c3亜~뫡ᙰ禿쨽㏡fṼzE/h": "5臐㋇Ჯ쮺? 昨탰Wム밎#'\"崲钅U?幫뺀⍾@4kh>騧\\0ҾEV=爐͌U捀%ꉼ 㮋<{j]{R>:gԩL\u001c瀈锌ﯲﳡꚒ'⫿E4暍㌗뵉X\"H᝜", "ᱚגּ;s醒}犍SἿ㦣&{T$jkB\\\tḮ앾䤹o<避(tW": "vb⯽䴪䮢@|)", "⥒퐁껉%惀뗌+녣迺顀q條g⚯i⤭룐M琹j̈́⽜A": -8385214638503106917, "逨ꊶZ<\/W⫟솪㎮ᘇb?ꠔi\"H㧺x෷韒Xꫨฟ|]窽\u001a熑}Agn?Mᶖa9韲4$3Ỵ^=쏍煤ፐ돷2䣃%鷠/eQ9頸쥎", 2398360204813891033, false, 3.2658897259932633E-19, null, "?ꚃ8Nn㞷幵d䲳䱲뀙ꪛQ瑓鎴]䩋-鰾捡䳡??掊", false, -1309779089385483661, "ᦲxu_/yecR.6芏.ᜇ過 ~", -5658779764160586501, "쒌:曠=l썜䢜wk#s蕚\"互㮉m䉤~0듐䋙#G;h숄옥顇෤勹(C7㢅雚㐯L⠅VV簅<", null, -4.664877097240962E18, -4.1931322262828017E18, { ",": { "v㮟麑䄠뤵g{M띮.\u001bzt뢜뵡0Ǥ龍떟Ᾰ怷ϓRT@Lꀌ樂U㏠⾕e扉|bJg(뵒㠶唺~ꂿ(땉x⻫싉쁊;%0鎻V(o\f,N鏊%nk郼螺": -1.73631993428376141E18, "쟧摑繮Q@Rᕾ㭚㾣4隅待㓎3蒟": [ 4971487283312058201, 8973067552274458613, { "`a揙ᣗ\u0015iBo¸": 4.3236479112537999E18, "HW&퉡ぁ圍Y?瑡Qy훍q!帰敏s舠㫸zꚗaS歲v`G株巷Jp6킼 (귶鍔⾏⡈>M汐㞍ቴ꙲dv@i㳓ᇆ?黍": [ null, 4997607199327183467, "E㻎蠫ᐾ高䙟蘬洼旾﫠텛㇛?'M$㣒蔸=A_亀绉앭rN帮", null, [{ "Eᑞ)8餧A5u&㗾q?": [ -1.969987519306507E-19, null, [ 3.42437673373841E-20, true, "e걷M墁\"割P␛퍧厀R䱜3ﻴO퓫r﹉⹊", [ -8164221302779285367, [ true, null, "爘y^-?蘞Ⲽꪓa␅ꍨ}I", 1.4645984996724427E-19, [{ "tY좗⧑mrzﺝ㿥ⴖ᥷j諅\u0000q賋譁Ꞅ⮱S\nࡣB/큃굪3Zɑ复o<\/;롋": null, "彟h浠_|V4䦭Dᙣ♞u쿻=삮㍦\u001e哀鬌": [{"6횣楠,qʎꗇ鎆빙]㱭R굋鈌%栲j分僅ペ䇰w폦p蛃N溈ꡐꏀ?@(GI뉬$ﮄ9誁ꓚ2e甸ڋ[䁺,\u0011\u001cࢃ=\\+衪䷨ᯕ鬸K": [[ "ㅩ拏鈩勥\u000etgWVXs陂規p狵w퓼{뮵_i\u0002ퟑႢ⬐d6鋫F~챿搟\u0096䚼1ۼ칥0꣯儏=鋷牋ⅈꍞ龐", -7283717290969427831, true, [ 4911644391234541055, { "I鈒첽P릜朸W徨觘-Hᎄ퐟⓺>8kr1{겵䍃〛ᬡ̨O귑o䝕'쿡鉕p5": "fv粖RN瞖蛐a?q꤄\u001d⸥}'ꣴ犿ꦼ?뤋?鵆쥴덋䡫s矷̄?ඣ/;괱絢oWfV<\/\u202cC,㖦0䑾%n賹g&T;|lj_欂N4w", "짨䠗;䌕u i+r๏0": [{"9䥁\\఩8\"馇z䇔<\/ႡY3e狚쐡\"ุ6ﰆZ遖c\"Ll:ꮾ疣<\/᭙O◌납୕湞9⡳Und㫜\u0018^4pj1;䧐儂䗷ୗ>@e톬": { "a⑂F鋻Q螰'<퇽Q贝瀧{ᘪ,cP&~䮃Z?gI彃": [ -1.69158726118025933E18, [ "궂z簽㔛㮨瘥⤜䛖Gℤ逆Y⪾j08Sn昞ꘔ캻禀鴚P謦b{ꓮmN靐Mᥙ5\"睏2냑I\u0011.L&=?6ᄠ뻷X鸌t刑\"#z)o꫚n쳟줋", null, 7517598198523963704, "ኑQp襟`uᩄr方]*F48ꔵn俺ሙ9뇒", null, null, 6645782462773449868, 1219168146640438184, null, { ")ယ넌竀Sd䰾zq⫣⏌ʥ\u0010ΐ' |磪&p牢蔑mV蘸૰짬꺵;K": [ -7.539062290108008E-20, [ true, false, null, true, 6574577753576444630, [[ 1.2760162530699766E-19, [ null, [ "顊\\憎zXB,", [{ "㇆{CVC9-MN㜋ઘR눽#{h@ퟨ!鼚׼XOvXS\u0017ᝣ=cS+梽៲綆16s덽휐y屬?ᇳG2ᴭ\u00054쫖y룇nKcW̭炦s/鰘ᬽ?J|퓀髣n勌\u0010홠P>j": false, "箴": [ false, "鍞j\"ꮾ*엇칬瘫xṬ⭽쩁䃳\"-⋵?ᦽ댎Ĝ": true, "Pg帯佃籛n㔠⭹࠳뷏≻࿟3㞱!-쒾!}쭪䃕!籿n涻J5ਲ਼yvy;Rኂ%ᔡጀ裃;M⣼)쵂쑈": 1.80447711803435366E18, "ꈑC⡂ᑆ㤉壂뎃Xub<\/쀆༈憓ق쨐ק\\": [ 7706977185172797197, {"": {"K╥踮砆NWࡆFy韣7ä밥{|紒︧䃀榫rᩛꦡTSy잺iH8}ퟴ,M?Ʂ勺ᴹ@T@~꾂=I㙕뾰_涀쑜嫴曣8IY?ҿo줫fऒ}\\S\"ᦨ뵼#nDX": { "♘k6?଱癫d68?㽚乳䬳-V顷\u0005蝕?\u0018䞊V{邾zじl]雏k臤~ൖH뒐iꢥ]g?.G碄懺䔛pR$䅒X觨l봜A刊8R梒',}u邩퉕?;91Ea䈈믁G⊶芔h袪&廣㺄j;㡏綽\u001bN頸쳘橆": -2272208444812560733, "拑Wﵚj鵼駳Oࣿ)#㾅顂N傓纝y僱栜'Bꐍ-!KF*ꭇK¦?䈴^:啤wG逭w᧯": "xᣱmYe1ۏ@霄F$ě꧘푫O䤕퀐Pq52憬ꀜ兴㑗ᡚ?L鷝ퟐ뭐zJꑙ}╆ᅨJB]\"袌㺲u8䯆f", "꿽၅㔂긱Ǧ?SI": -1669030251960539193, "쇝ɨ`!葎>瞺瘡驷錶❤ﻮ酜=": -6961311505642101651, "?f7♄꫄Jᡔ훮e읇퍾፣䭴KhखT;Qty}O\\|뫁IῒNe(5惁ꥶㆷY9ﮡ\\ oy⭖-䆩婁m#x봉>Y鈕E疣s驇↙ᙰm<": {"퉻:dꂁ&efᅫ쫢[\"돈늖꺙|Ô剐1͖-K:ʚ᭕/;쏖㷛]I痐职4gZ4⍜kเꛘZ⥺\\Bʫᇩ鄨魢弞&幟ᓮ2̊盜", -9006004849098116748, -3118404930403695681, { "_彃Y艘-\"Xx㤩㳷瑃?%2䐡鵛o귵옔夘v*탋职&㳈챗|O钧": [ false, "daꧺdᗹ羞쯧H㍤鄳頳<型孒ン냆㹀f4㹰\u000f|C*ሟ鰠(O<ꨭ峹ipຠ*y೧4VQ蔔hV淬{?ᵌEfrI_", "j;ꗣ밷邍副]ᗓ", -4299029053086432759, -5610837526958786727, [ null, [ -1.3958390678662759E-19, { "lh좈T_믝Y\"伨\u001cꔌG爔겕ꫳ晚踍⿻읐T䯎]~e#฽燇\"5hٔ嶰`泯r;ᗜ쮪Q):/t筑,榄&5懶뎫狝(": [{ "2ፁⓛ]r3C攟וּ9賵s⛔6'ஂ|\"ⵈ鶆䐹禝3\"痰ࢤ霏䵩옆䌀?栕r7O簂Isd?K᫜`^讶}z8?z얰T:X倫⨎ꑹ": -6731128077618251511, "|︦僰~m漿햭\\Y1'Vvخ굇ቍ챢c趖": [null] }], "虌魿閆5⛔煊뎰㞤ᗴꥰF䮥蘦䂪樳-K᝷-(^\u20dd_": 2.11318679791770592E17 } ] ] ]}, "묗E䀳㧯᳀逞GMc\b墹㓄끖Ơ&U??펌鑍 媋k))ᄊ": null, "묥7콽벼諌J_DɯﮪM殴䣏,煚ྼ`Y:씧<\/⩫%yf䦀!1Ჶk춎Q米W∠WC跉鬽*ᛱi㴕L꘻ꀏ쓪\"_g鿄'#t⽙?,Wg㥖|D鑆e⥏쪸僬h鯔咼ඡ;4TK聎졠嫞" } ] ] } ] ] ]}} } ]} }, "뿋뀾淣截䔲踀&XJ펖꙯^Xb訅ꫥgᬐ>棟S\"혧騾밫겁7-": "擹8C憎W\"쵮yR뢩浗絆䠣簿9䏈引Wcy䤶孖ꯥ;퐌]輩䍐3@{叝 뽸0ᡈ쵡Ⲇ\u001dL匁꧐2F~ݕ㪂@W^靽L襒ᦘ~沦zZ棸!꒲栬R" } ] ], "Z:덃൛5Iz찇䅄駠㭧蓡K1": "e8᧤좱U%?ⵇ䯿鿝\u0013縮R∱骒EO\u000fg?幤@֗퉙vU`", "䐃쪈埽້=Ij,쭗쓇చ": false }]}} ] } ]} } ] ] ], "咰긖VM]᝼6䓑쇎琺etDҌ?㞏ꩄ퇫밉gj8蠃\"⩐5䛹1ࣚ㵪": "ക蹊?⎲⧘⾚̀I#\"䈈⦞돷`wo窭戕෱휾䃼)앷嵃꾞稧,Ⴆ윧9S?೗EMk3Მ3+e{⹔Te驨7䵒?타Ulg悳o43" } ], "zQᤚ纂땺6#ٽ﹧v￿#ࠫ휊冟蹧텈ꃊʆ?&a䥯De潝|쿓pt瓞㭻啹^盚2Ꝋf醪,얏T窧\\Di䕎谄nn父ꋊE": -2914269627845628872, "䉩跐|㨻ᷢ㝉B{蓧瞸`I!℄욃힕#ೲᙾ竛ᔺCjk췒늕貭词\u0017署?W딚%(pꍁ⤼띳^=on뺲l䆼bzrﳨ[&j狸䠠=ᜑꦦ\u2061յnj=牲攑)M\\龏": false, "뎕y絬᫡⥮Ϙᯑ㌔/NF*˓.,QEzvK!Iwz?|쥾\"ꩻL꼗Bꔧ賴緜s뉣隤茛>ロ?(?^`>冺飒=噸泥⺭Ᲊ婓鎔븜z^坷裮êⓅ໗jM7ﶕ找\\O": 1.376745434746303E-19 }, "䐛r滖w㏤,|Nዜ": false } ]], "@꿙?薕尬 gd晆(띄5躕ﻫS蔺4)떒錸瓍?~": 1665108992286702624, "w믍nᏠ=`঺ᅥC>'從됐槷䤝眷螄㎻揰扰XᅧC贽uჍ낟jKD03T!lDV쀉Ӊy뢖,袛!终캨G?鉮Q)⑗1쾅庅O4ꁉH7?d\u0010蠈줘월ސ粯Q!낇껉6텝|{": null, "~˷jg쿤촖쉯y": -5.5527605669177098E18, "펅Wᶺzꐆと푭e?4j仪열[D<鈑皶婆䵽ehS?袪;HꍨM뗎ば[(嗏M3q퍟g4y╸鰧茀[Bi盤~﫝唎鋆彺⦊q?B4쉓癚O洙킋툈䶯_?ퟲ": null } ] ]] ]], "꟱Ԕ㍤7曁聯ಃ錐V䷰?v㪃૦~K\"$%请|ꇹn\"k䫛㏨鲨\u2023䄢\u0004[︊VJ?䶟ាꮈ䗱=깘U빩": -4863152493797013264 } ]}]} ] }}} ], "쏷쐲۹퉃~aE唙a챑,9㮹gLHd'䔏|킗㍞䎥&KZYT맵7䥺Nⱳ同莞鿧w\\༌疣n/+ꎥU\"封랾○ퟙAJᭌ?9䛝$?驔9讐짘魡T֯c藳`虉C읇쐦T" } ], "谶개gTR￐>ၵ͚dt晑䉇陏滺}9㉸P漄": -3350307268584339381 }] ] ] ]] ] ], "0y꟭馋X뱔瑇:䌚￐廿jg-懲鸭䷭垤㒬茭u賚찶ಽ+\\mT땱\u20821殑㐄J쩩䭛ꬿNS潔*d\\X,壠뒦e殟%LxG9:摸": 3737064585881894882, "풵O^-⧧ⅶvѪ8廸鉵㈉ר↝Q㿴뺟EႳvNM:磇>w/៻唎뷭୥!냹D䯙i뵱貁C#⼉NH6`柴ʗ#\\!2䂗Ⱨf?諳.P덈-返I꘶6?8ꐘ": -8934657287877777844, "溎-蘍寃i诖ര\"汵\"\ftl,?d⼡쾪⺋h匱[,෩I8MҧF{k瓿PA'橸ꩯ綷퉲翓": null } ] ], "ោ係؁<元": 1.7926963090826924E-18 }}] } ] ]]}] }] ] ] ] ], "ጩV<\"ڸsOᤘ": 2.0527167903723048E-19 }] ]} ] ]], "∳㙰3젴p᧗䱙?`yZA8Ez0,^ᙛ4_0븢\u001ft:~䎼s.bb룦明yNP8弆C偯;⪾짍'蕴뮛": -6976654157771105701, "큵ꦀ\\㇑:nv+뒤燻䀪ﴣ﷍9ᚈ኷K㚊誦撪䚛,ꮪxሲ쳊\u0005HSf?asg昱dqꬌVꙇ㼺'k*'㈈": -5.937042203633044E-20 } ] }], "?}\u20e0],s嶳菋@#2u쒴sQS䩗=ꥮ;烌,|ꘔ䘆": "ᅩ영N璠kZ먕眻?2ቲ芋眑D륟渂⸑ﴃIRE]啗`K'" }}, "쨀jmV賂ﰊ姐䂦玞㬙ᏪM᪟Վ씜~`uOn*ॠ8\u000ef6??\\@/?9見d筜ﳋB|S䝬葫㽁o": true }, "즛ꄤ酳艚␂㺘봿㎨iG৕ࡿ?1\"䘓您\u001fSኝ⺿溏zៀ뻤B\u0019?윐a䳵᭱䉺膷d:<\/": 3935553551038864272 } ] ]} ]] ]] ]} } ] } ]]}}, "᥺3h↛!ꋰy\"攜(ெl䪕oUkc1A㘞ᡲ촾ᣫ<\/䒌E㛝潨i{v?W౾H\\RჅpz蝬R脾;v:碽✘↯삞鷱o㸧瑠jcmK7㶧뾥찲n": true, "ⶸ?x䊺⬝-䰅≁!e쩆2ꎿ准G踌XXᩯ1߁}0?.헀Z馟;稄\baDꟹ{-寪⚈ꉷ鮸_L7ƽᾚ<\u001bጨA䧆송뇵⨔\\礍뗔d设룱㶉cq{HyぱR㥽吢ſtp": -7985372423148569301, "緫#콮IB6<\/=5Eh礹\t8럭@饹韠r㰛斣$甝LV췐a갵'请o0g:^": "䔨(.", "띳℡圤pン௄ĝ倧訜B쁟G䙔\"Sb⓮;$$▏S1J뢙SF|赡g*\"Vu䲌y": "䪈&틐),\\kT鬜1풥;뷴'Zေ䩹@J鞽NぼM?坥eWb6榀ƩZڮ淽⺞삳煳xჿ絯8eⶍ羷V}ჿ쎱䄫R뱃9Z>'\u20f1ⓕ䏜齮" } ] ]]] }} } ] ]}, "펮b.h粔폯2npX詫g錰鷇㇒<쐙S値bBi@?镬矉`剔}c2壧ଭfhY깨R()痩⺃a\\⍔?M&ﯟ<劜꺄멊ᄟA\"_=": null }, "~潹Rqn榢㆓aR鬨侅?䜑亡V_翅㭔(䓷w劸ၳDp䀅<\/ﰎ鶊m䵱팱긽ꆘ긓准D3掱;o:_ќ)껚콥8곤d矦8nP倥ꃸI": null, "뾎/Q㣩㫸벯➡㠦◕挮a鶧⋓偼\u00001뱓fm覞n?㛅\"": 2.8515592202045408E17 }], ",": -5426918750465854828, "2櫫@0柡g䢻/gꆑ6演&D稒肩Y?艘/놘p{f투`飷ᒉ챻돎<늛䘍ﴡ줰쫄": false, "8(鸑嵀⵹ퟡ<9㣎Tߗ┘d슒ل蘯&㠦뮮eࠍk砝g 엻": false, "d-\u208b?0ﳮ嵙'(J`蔿d^踅⤔榥\\J⵲v7": 6.8002426206715341E17, "ཎ耰큓ꐕ㱷\u0013y=詽I\"盈xm{0쾽倻䉚ષso#鰑/8㸴짯%ꀄ떸b츟*\\鲷礬ZQ兩?np㋄椂榨kc᡹醅3": false, "싊j20": false }]] ]], "俛\u0017n緽Tu뫉蜍鼟烬.ꭠIⰓ\"Ἀ᜾uC쎆J@古%ꛍm뻨ᾀ画蛐휃T:錖㑸ዚ9죡$": true } ] ], "㍵⇘ꦖ辈s}㱮慀밒s`\"㞟j:`i픻Z섫^諎0Ok{켿歁෣胰a2﨤[탳뚬쎼嫭뉮m": 409440660915023105, "w墄#*ᢄ峠밮jLa`ㆪ꺊漓Lで끎!Agk'ꁛ뢃㯐岬D#㒦": false, "ଦPGI䕺L몥罭ꃑ궩﮶#⮈ᢓӢ䚬p7웼臧%~S菠␌힀6&t䳙y㪘냏\\*;鉏ᅧ鿵'嗕pa\"oL쇿꬈Cg": "㶽1灸D⟸䴅ᆤ뉎﷛渤csx 䝔цꬃ锚捬?ຽ+x~꘩uI࡞\u0007栲5呚ẓem?袝\")=㥴䨃pac!/揎Y", "ᷱo\\||뎂몷r篙|#X䦜I#딌媸픕叞RD斳X4t⯩夬=[뭲r=绥jh뷱츝⪘%]⚋܈㖴スH텹m(WO曝劉0~K3c柢Ր㏉着逳~": false, "煽_qb[첑\\륌wE❽ZtCNﭝ+餌ᕜOꛭ": "{ﳾ쉌&s惧ᭁⵆ3䢫;䨞팑꒪흘褀࢖Q䠿V5뭀䎂澻%받u5텸oA⮥U㎦;B䳌wz䕙$ឿ\\௅婺돵⪾퐆\\`Kyौꋟ._\u0006L챯l뇠Hi䧈偒5", "艊佁ࣃ롇䱠爬!*;⨣捎慓q靓|儑ᨋL+迥=6㒺딉6弄3辅J-㕎뛄듘SG㆛(\noAzQꝱ䰩X*ぢO퀌%펠낌mo틮a^<\/F&_눊ᾉ㨦ы4\"8H": 2974648459619059400, "鬙@뎣䫳ၮ끡?){y?5K;TA*k溱䫜J汃ꂯ싔썍\u001dA}룖(<\/^,": false, "몏@QꋦFꊩᒐ뎶lXl垨4^郣|ꮇ;䝴ᝓ}쵲z珖": null } ]]]], ":_=닧弗D䙋暨鏛. 㱻붘䂍J儒&ZK/녩䪜r囁⽯D喠죥7⹌䪥c\u001a\u2076￞妈朹oLk菮F౟覛쐧㮏7T;}蛙2{9\"崓bB<\/⡷룀;즮鿹)丒툃୤뷠5W⊢嶜(fb뭳갣": "E{响1WM" }}, "䘨tjJ驳豨?y輊M*᳑梵瞻઻ofQG瑮e": 2.222802939724948E-19, "䮴=❑➶T෋w䞜\"垦ꃼUt\u001dx;B$뵣䙶E↌艣ᡥ!᧟;䱀[䔯k쬃`੍8饙른熏'2_'袻tGf蒭J땟as꯳╖&啒zWࡇᒫYSᏬ\u0014ℑ첥鈤|cG~Pᓮ\">\"": "ႆl\f7V儊㦬nHꄬꨧC{쐢~C⮃⛓嶦vꄎ1w鰠嘩뿠魄&\"_qMⵖ釔녮ꝇ 㝚{糍J哋 cv?-jkﻯྌ鹑L舟r", "龧葆yB✱H盋夔ﶉ?n*0(": "ꧣኆ㢓氥qZZ酒ຜ)鮢樛)X䣆gTSґG텞k.J圬疝롫쯭z L:\\ྤ@w炋塜쿖ᾳy뢀䶃뱝N䥨㚔勇겁#p", "도畎Q娡\"@S/뼋:䵏!P衅촚fVHQs✜ᐫi㻑殡B䜇%믚k*U#濨낄~": "ꍟዕ쳸ꍈ敋&l妏\u0005憡멗瘌uPgᅪm<\/To쯬锩h뒓k" } ] }], "墥홞r绚<\/⸹ⰃB}<躅\\Y;๑@䔸>韫䜲뱀X뗩鿥쩗SI%ﴞ㳕䛇?<\/\u00018x\\&侂9鋙a[LR㋭W胕)⡿8㞙0JF,}?허d1cDMᐃ␛鄝ⱕ%X)!XQ": "ⳍꗳ=橇a;3t⦾꼑仈ူaᚯ⯋ꕃAs鴷N⍕_䎃ꙎAz\u0016䯷\\<࿫>8q{}キ?ᣰ}'0ᴕ펓B┦lF#趤厃T?㕊#撹圂䆲" }, "܋닐龫論c웑": false, "ㇿ/q\"6-co髨휝C큦#\u001b4~?3䐹E삇<<": 7.600917488140322E-20, "䁝E6?㣖ꃁ间t祗*鑠{ḣV(浾h逇큞=W?ૉ?nꇽ8ꅉຉj으쮺@Ꚅ㰤u]Oyr": "v≁᫸_*όAඤԆl)ۓᦇQ}폠z༏q滚", "ソ᥊/넺I": true }]] ] ] ] ]] }, "䭑Ik攑\u0002QV烄:芩.麑㟴㘨≕": true, "坄꿕C쇻풉~崍%碼\\8\"䬦꣙": null, "欌L圬䅘Y8c(♺2?ON}o椳s宥2䉀eJ%闹r冁O^K諭%凞⺉⡻,掜?$ꥉ?略焕찳㯊艼誜4?\"﯎<゛XፈINT:詓 +": -1.0750456770694562E-19, "獒àc뜭싼ﺳ뎤K`]p隨LtE": null, "甙8䵊神EIꩤ鐯ᢀ,ﵮU䝑u疒ử驺䚿≚ഋ梶秓F`覤譐#짾蔀묊4<媍쬦靪_Yzgcࡶ4k紥`kc[Lﮗ簐*I瀑[⾰L殽鑥_mGȠ<\/|囹灠g桰iri": true, "챓ꖙꟻ좝菇ou,嗠0\\jK핻뜠qwQ?ഩ㼕3Y彦b\u009bJ榶N棨f?됦鏖綃6鳵M[OE봨u햏.Ꮁ癜蟳뽲ꩌ뻾rM豈R嗀羫 uDꎚ%": null }, "V傜2<": 7175127699521359521 }], "铫aG切<\/\"ী⊆e<^g࢛)D顝nאַ饼\u008c猪繩嵿ﱚCꡬ㻊g엺A엦\u000f暿_f꿤볝㦕桦`蒦䎔j甬%岝rj 糏": "䚢偎눴Au<4箞7礦Iﱔ坠eȧ䪸u䵁p|逹$嗫쨘ꖾ﷐!胠z寓팢^㨔|u8Nሇe텔ꅦ抷]،鹎㳁#༔繁 ", "낂乕ꃻ볨ϱ-ꇋ㖍fs⿫)zꜦ/K?솞♞ꑌ宭hJ᤭瑥Fu": false, "쟰ぜ魛G\u0003u?`㾕ℾ㣭5螠烶這趩ꖢ:@咕ꐶx뒘느m䰨b痃렐0鳊喵熬딃$摉_~7*ⱦ녯1錾GKhJ惎秴6'H妈Tᧅ窹㺒疄矤铟wላ": null, "쯆q4!3錕㲏ⵆ㇛꘷Z瑩뭆\\◪NH\u001d\\㽰U~㯶<\"쑣낞3ᵤ'峉eꢬ;鬹o꣒木X*長PXᘱu\"䠹n惞": null, "ᅸ祊\"&ꥴCjࢼ﴿?䡉`U效5殼㮞V昽ꏪ#ﺸ\\&t6x꠹盥꣰a[\u001aꪍSpe鎿蠹": -1.1564713893659811E-19 } ]] ] ] ], "羵䥳H,6ⱎ겾|@t\"#햊1|稃 섭)띜=뻔ꡜ???櫎~*ῡ꫌/繣ﻠq": null } ]} ]}, "츤": false }}, "s": 3.7339341963399598E18 } ], "N,I?1+㢓|ࣱ嶃쩥V2\u0012(4EE虪朶$|w颇v步": "~읢~_,Mzr㐫YB溓E淚\"ⅹ䈔ᏺ抙 b,nt5V㐒J檶ꏨ⻔?", "Q껑ꡡ}$넎qH煔惍/ez^!ẳF댙䝌馻剁8": "梲;yt钰$i冄}AL%a j뜐奷걳뚾d꿽*ሬuDY3?뮟鼯뮟w㍪틱V", "o{Q/K O胟㍏zUdꀐm&⨺J舕⾏魸訟㌥[T籨櫉唐킝 aṭ뱫촙莛>碶覆⧬짙쭰ׯdAiH໥벤퐥_恸[ 0e:죃TC弼荎뵁DA:w唵ꣁ": null, "὏樎䵮軧|?౗aWH쩃1 ꅭsu": null } ] }, "勂\\&m鰈J釮=Ⲽ鳋+䂡郑": null, "殣b綊倶5㥗惢⳷萢ᑀ䬄镧M^ﱴ3⣢翣n櫻1㨵}ኯ뗙顖Z.Q➷ꮨ뗇\u0004": "ꔙ䁼>n^[GीA䨟AM琢ᒊS쨲w?d㶣젊嘶纝麓+愣a%気ྞSc됓ᔘ:8bM7Xd8㶑臌]Ꙥ0ꐭ쒙䫣挵C薽Dfⵃ떼᷸", "?紡.셪_෨j\u0013Ox┠$Xᶨ-ᅇo薹-}軫;y毝㪜K㣁?.EV쮱4둽⛻䤜'2盡\u001f60(|e쐰㼎ᦀ㒧-$l@ﻑ坳\u0003䭱响巗WFo5c㧆T턁Y맸♤(": -2.50917882560589088E17 }} ], "侸\\릩.᳠뎠狣살cs项䭩畳H1s瀉븇19?.w骴崖㤊h痠볭㞳㞳䁮Ql怠㦵": "@䟴-=7f", "鹟1x௢+d ;vi䭴FSDS\u0004hꎹ㚍?⒍⦏ў6u,扩@됷Su)Pag휛TᒗV痩!瞏釀ꖞ蘥&ೞ蘐ꭰꞇᝎ": "ah懱Ժ&\u20f7䵅♎඀䞧鿪굛ౕ湚粎蚵ᯋ幌YOE)५襦㊝Y*^\"R+ඈ咷蝶9ꥂ榨艦멎헦閝돶v좛咊E)K㓷ྭr", "搆q쮦4綱켙셁.f4<\/g<籽늷?#蚴픘:fF\u00051㹉뀭.ᰖ풎f֦Hv蔎㧤.!䭽=鞽]음H:?\"-4": 8.740133984938656E-20 }]} } ], "tVKn딩꘥⊾蹓᤹{\u0003lR꼽ᄲQFᅏ傅ﱋ猢⤊ᔁ,E㓒秤nTතv`♛I\u0000]꫔ṞD\"麵c踝杰X&濿또꣹깳౥葂鿎\\aꡨ?": 3900062609292104525 } ], "ਉ샒⊩Lu@S䧰^g": -1.1487677090371648E18, "⎢k⑊꬗yᏫ7^err糎Dt\u000bJ礯확ㆍ沑サꋽe赔㝢^J\u0004笲㿋idra剰-᪉C錇/Ĝ䂾ညS지?~콮gR敉⬹'䧭": 1901472137232418266, "灗k䶥:?촽贍쓉꓈㒸g獘[뵎\\胕?\u0014_榙p.j稶,$`糉妋0>Fᡰly㘽$?": "]ꙛO赎&#㠃돱剳\"<◆>0誉齐_|z|裵씪>ᐌ㼍\"Z[琕}O?G뚇諦cs⠜撺5cu痑U圲\u001c?鴴計l춥/╓哼䄗茏ꮅ뫈댽A돌롖뤫V窗讬sHd&\nOi;_u" } ], "Uﺗ\\Y\\梷䄬~\u0002": null, "k\"Y磓ᗔ휎@U冈<\/w컑)[": false, "曏J蝷⌻덦\u001f㙳s꥓⍟邫P늮쥄c∬ྡྷ舆렮칤Z趣5콡넛A쳨\\뀙骫(棻.*&輛LiIfi{@EA婳KᬰTXT": -4.3088230431977587E17 }]} ] ], "곃㲧<\/dఓꂟs其ࡧ&N葶=?c㠤Ჴ'횠숄臼#\u001a~": false } ] ]}] }] }} ], "2f`⽰E쵟>J笂裭!〛觬囀ۺ쟰#桊l鹛ⲋ|RA_Vx፭gE됓h﵀mfỐ|?juTU档[d⢼⺻p濚7E峿": 5613688852456817133 }, "濘끶g忮7㏵殬W팕Q曁 뫰)惃廊5%-蹚zYZ樭ﴷQ锘쯤崫gg": true, "絥ᇑ⦏쒓븣爚H.㗊߄o蘵貆ꂚ(쎔O᥉ﮓ]姨Wꁓ!RMA|o퉢THx轮7M껁U즨'i뾘舯o": "跥f꜃?" }} ], "鷰鹮K-9k;ﰰ?_ݦѷ-ꅣ䩨Zꥱ\"mꠟ屎/콑Y╘2&鸞脇㏢ꀇ࠺ⰼ拾喭틮L꽩bt俸墶 [l/웄\"꾦\u20d3iও-&+\u000fQ+໱뵞": -1.296494662286671E-19 }, "HX੹/⨇୕붷Uﮘ旧\\쾜͔3l鄈磣糂̖䟎Eᐳw橖b῀_딕hu葰窳闹вU颵|染H죶.fP䗮:j䫢\\b뎖i燕ꜚG⮠W-≚뉗l趕": "ଊ칭Oa᡺$IV㷧L\u0019脴셀붿餲햪$迳向쐯켂PqfT\" ?I屉鴼쿕@硙z^鏕㊵M}㚛T젣쓌-W⩐-g%⺵<뮱~빅╴瑿浂脬\u0005왦燲4Ⴭb|D堧 <\/oEQh", "䘶#㥘੐캔f巋ἡAJ䢚쭈ࣨ뫒*mᇊK,ࣺAꑱ\u000bR<\/A\"1a6鵌㯀bh곿w(\"$ꘁ*rಐ趣.d࿩k/抶면䒎9W⊃9": "漩b挋Sw藎\u0000", "畀e㨼mK꙼HglKb,\"'䤜": null }]}] ] ] }] ]} ] ]} ], "歙>駿ꣂ숰Q`J΋方樛(d鱾뼣(뫖턭\u20f9lচ9歌8o]8윶l얶?镖G摄탗6폋폵+g:䱫홊<멀뀿/س|ꭺs걐跶稚W々c㫣⎖": "㣮蔊깚Cꓔ舊|XRf遻㆚︆'쾉췝\\&言", "殭\"cށɨꝙ䞘:嬮e潽Y펪㳅/\"O@ࠗ겴]췖YǞ(t>R\"N?梳LD恭=n氯T豰2R諸#N}*灧4}㶊G䍣b얚": null, "襞<\/啧 B|싞W瓇)6簭鼡艆lN쩝`|펭佡\\間邝[z릶&쭟愱ꅅ\\T᰽1鯯偐栈4̸s윜R7⒝/똽?치X": "⏊躖Cﱰ2Qẫ脐&இ?%냝悊", ",鰧偵셣싹xᎹ힨᯳EṬH㹖9": -4604276727380542356 } } ]]]], "웺㚑xs}q䭵䪠馯8?LB犯zK'os䚛HZ\"L?셎s^㿧㴘Cv2": null }] ] ] ], "Kd2Kv+|z": 7367845130646124107, "ᦂⶨ?ᝢ 祂些ഷ牢㋇操\"腭䙾㖪\\(y4cE뽺ㆷ쫺ᔖ%zfۻ$ў1柦,㶢9r漢": -3.133230960444846E-20, "琘M焀q%㢟f鸯O⣏蓑맕鯊$O噷|)z褫^㢦⠮ꚯ꫞`毕1qꢚ{ĭ䎀বώT\"뱘3G൴?^^of": null } ], "a8V᯺?:ﺃ/8ꉿBq|9啓댚;*i2": null, "cpT瀇H珰Ừpೃi鎪Rr␣숬-鹸ҩ䠚z脚цGoN8入y%趌I┽2ឪЀiJNcN)槣/▟6S숆牟\"箑X僛G殱娇葱T%杻:J諹昰qV쨰": 8331037591040855245 }], "G5ᩜ䄗巢껳": true } }, "Ồ巢ゕ@_譙A`碫鄐㡥砄㠓(^K": "?܃B혢▦@犑ὺD~T⧁|醁;o=J牌9냚⢽㨘{4觍蚔9#$∺\u0016p囅\\3Xk阖⪚\"UzA穕롬✎➁㭒춺C㣌ဉ\"2瓑员ᅽꝶ뫍}꽚ꞇ鶂舟彺]ꍽJC蝧銉", "␆Ě膝\"b-퉐ACR言J謈53~V튥x䜢?ꃽɄY뮩ꚜ": "K/↾e萃}]Bs⾿q룅鷦-膋?m+死^魊镲6", "粡霦c枋AHퟁo礼Ke?qWcA趸㡔ꂏ?\u000e춂8iতᦜ婪\u0015㢼nﵿꍻ!ᐴ関\u001d5j㨻gfῩUK5Ju丝tかTI'?㓏t>⼟o a>i}ᰗ;뤕ܝ": false, "ꄮ匴껢ꂰ涽+䜨B蛹H䛓-k蕞fu7kL谖,'涃V~챳逋穞cT\"vQ쓕ObaCRQ㓡Ⲯ?轭⫦輢墳?vA餽=h䮇킵n폲퉅喙?\"'1疬V嬗Qd灗'Lự": "6v!s믁㭟㣯獃!磸餠ቂh0C뿯봗F鷭gꖶ~コkK<ᦈTt\\跓w㭣횋钘ᆹ듡䑚W䟾X'ꅔ4FL勉Vܴ邨y)2'〚쭉⽵-鞣E,Q.?块", "?(˧쩯@崟吋歄K": null }, "Gc럃녧>?2DYI鴿\\륨)澔0ᔬlx'觔7젘⤡縷螩%Sv׫묈/]↱&S h\u0006歋ᑛxi̘}ひY蔯_醨鯘煑橾8?䵎쨋z儬ꁏ*@츾:": null } } } ] ] ]} }, "HO츧G": 3.694949578823609E17, "QC\u0012(翻曇Tf㷟bGBJ옉53\\嚇ᛎD/\u001b夾၉4\"핀@祎)쫆yD\"i먎Vn㿿V1W᨝䶀": -6150931500380982286, "Z㓮P翸鍱鉼K䋞꘺튿⭁Y": -7704503411315138850, "]모开ꬖP븣c霤<[3aΠ\"黁䖖䰑뮋ꤦ秽∼㑷冹T+YUt\"싳F↭䖏&鋌": -2.7231911483181824E18, "tꎖ": -4.9517948741799555E-19, "䋘즊.⬅IꬃۣQ챢ꄑ黐|f?C⾺|兕읯sC鬸섾整腨솷V": "旆柩l쪦sᖸMy㦅울썉瘗㎜檵9ꍂ駓ૉᚿ/u3씅徐拉[Z䞸ࡗ1ꆱ&Q풘?ǂ8\u0011BCDY2볨;鸏": null, "幫 n煥s쁇펇 왊-$C\"衝:\u0014㣯舼.3뙗Yl⋇\"K迎멎[꽵s}9鉳UK8쐥\"掄㹖h㙈!얄સ?Ꜳ봺R伕UTD媚I䜘W鏨蔮": -4.150842714188901E-17, "ﺯ^㄄\b죵@fྉkf颡팋Ꞧ{/Pm0V둳⻿/落韒ꊔᚬ@5螺G\\咸a谆⊪ቧ慷绖?财(鷇u錝F=r၍橢ឳn:^iᴵtD볠覅N赴": null }] }] } ] ]} ]}, "謯?w厓奰T李헗聝ឍ貖o⪇弒L!캶$ᆅ": -4299324168507841322, "뺊奉_垐浸延몏孄Z舰2i$q붿좾껇d▵餏\"v暜Ҭ섁m￴g>": -1.60911932510533427E18 } ] } ] ]], "퉝꺔㠦楶Pꅱ": 7517896876489142899, "": false } ]}, "是u&I狻餼|谖j\"7c됮sסּ-踳鉷`䣷쉄_A艣鳞凃*m⯾☦椿q㎭N溔铉tlㆈ^": 1.93547720203604352E18, "kⲨ\\%vr#\u000bⒺY\\t<\/3﬌R訤='﹠8蝤Ꞵ렴曔r": false } ]}, "阨{c?C\u001d~K?鎌Ԭ8烫#뙣P초遗t㭱E­돒䆺}甗[R*1!\\~h㕅᰺@<9JꏏષI䳖栭6綘걹ᅩM\"▯是∔v鬽顭⋊譬": "운ﶁK敂(欖C취پ℄爦賾" } }} }], "鷨赼鸙+\\䭣t圙ڹx᜾ČN<\/踘\"S_맶a鷺漇T彚⎲i㈥LT-xA캔$\u001cUH=a0츺l릦": "溣㣂0濕=鉵氬駘>Pꌢpb솇쬤h힊줎獪㪬CrQ矠a&脍꼬爼M茴/΅\u0017弝轼y#Ꞡc6둴=?R崏뷠麖w?" }, "閕ᘜ]CT)䵞l9z'xZF{:ؐI/躅匽졁:䟇AGF૸\u001cퟗ9)駬慟ꡒꆒRS״툋A<>\u0010\"ꂔ炃7g덚E৏bꅰ輤]o㱏_뷕ܘ暂\"u": "芢+U^+㢩^鱆8*1鈶鮀\u0002뺰9⬳ꪮlL䃣괟,G8\u20a8DF㉪錖0ㄤ瓶8Nଷd?眡GLc陓\\_죌V쁰ल二?c띦捱 \u0019JC\u0011b⤉zẒT볕\"绣蘨뚋cꡉkI\u001e鳴", "ꃣI'{6u^㡃#཰Kq4逹y൒䧠䵮!㱙/n??{L풓ZET㙠퍿X2᩟綳跠葿㚙w཮x캽扳B唕S|尾}촕%N?o䪨": null, "ⰴFjෟ셈[\u0018辷px?椯\\1<ﲻ栘ᣁ봢憠뉴p": -5263694954586507640 } ] ]] ]} ]}] ] ], "?#癘82禩鋆ꊝty?&": -1.9419029518535086E-19 } ] ] ]} ] ] ], "훊榲.|῕戄&.㚏Zꛦ2\"䢥ሆ⤢fV_摕婔?≍Fji冀탆꜕i㏬_ẑKᅢ꫄蔻XWc|饡Siẘ^㲦?羡2ぴ1縁ᙅ?쐉Ou": false }]] ]}}}, "慂뗄卓蓔ᐓ匐嚖/颹蘯/翻ㆼL?뇊,텵<\\獷ごCボ": null }, "p溉ᑟi짣z:䒤棇r^٫%G9缑r砌롧.물农g?0׼ሩ4ƸO㣥㯄쩞ጩ": null, "껎繥YxK\"F젷쨹뤤1wq轫o?鱑뜀瘊?뎃h灑\\ꛣ}K峐^ኖ⤐林ꉓhy": null } ], "᱀n肓ㄛ\"堻2>m殮'1橌%Ꞵ군=Ӳ鯨9耛<\/n據0u彘8㬇៩f᏿诙]嚊": "䋯쪦S럶匏ㅛ#)O`ሀX_鐪渲⛀㨻宅闩➈ꢙஶDR⪍" }, "tA썓龇 ⋥bj왎录r땽✒롰;羋^\\?툳*┎?썀ma䵳넅U䳆૘〹䆀LQ0\b疀U~u$M}(鵸g⳾i抦뛹?䤈땚검.鹆?ꩡtⶥGĒ;!ቹHS峻B츪켏f5≺": 2366175040075384032, "전pJjleb]ួ": -7.5418493141528422E18, "n.鎖ጲ\n?,$䪘": true }, "欈Ar㉣螵᪚茩?O)": null }, "쫸M#x}D秱欐K=侫们丐.KꕾxẠ\u001e㿯䣛F܍캗qq8꟞ṢFD훎⵳簕꭛^鳜\u205c٫~⑟~冫ऊ2쫰<\/戲윱o<\"": true }, "㷝聥/T뱂\u0010锕|内䞇x侁≦㭖:M?iM᣿IJe煜dG࣯尃⚩gPt*辂.{磼럾䝪@a\\袛?}ᓺB珼": true } } ]]}]}}, "tn\"6ꫤ샾䄄;銞^%VBPwu묪`Y僑N.↺Ws?3C⤻9唩S䠮ᐴm;sᇷ냞඘B/;툥B?lB∤)G+O9m裢0kC햪䪤": -4.5941249382502277E18, "ᚔt'\\愫?鵀@\\びꂕP큠<<]煹G-b!S?\nꖽ鼫,ݛ&頺y踦?E揆릱H}햧캡b@手.p탻>췽㣬ꒅ`qe佭P>ᓂ&?u}毚ᜉ蟶頳졪ᎏzl2wO": -2.53561440423275936E17 }]} } ] ]], "潈촒⿂叡": 5495738871964062986 } ]] } ] ]} ]] ]] ]} ] ]}, "ႁq킍蓅R`謈蟐ᦏ儂槐僻ﹶ9婌櫞釈~\"%匹躾ɢ뤥>࢟瀴愅?殕节/냔O✬H鲽엢?ᮈੁ⋧d␽㫐zCe*": 2.15062231586689536E17, "㶵Ui曚珰鋪ᾼ臧P{䍏䷪쨑̟A뼿T渠誈䏚D1!잶<\/㡍7?)2l≣穷᛾稝{:;㡹nemיּ訊`G": null, "䀕\"飕辭p圁f#뫆䶷뛮;⛴ᩍ3灚덏ᰝ쎓⦷詵%᜖Մfs⇫(\u001e~P|ﭗCⲾផv湟W첋(텪બT<บSꏉ੗⋲X婵i ӵ⇮?L䬇|ꈏ?졸": 1.548341247351782E-19 } ] }, "t;:N\u0015q鐦Rt缆{ꮐC?஛㷱敪\\+鲊㉫㓪몗릙竏(氵kYS": "XᰂT?൮ô", "碕飦幑|+ 㚦鏶`镥ꁩ B<\/加륙": -4314053432419755959, "秌孳(p!G?V傫%8ሽ8w;5鲗㦙LI檸\u2098": "zG N볞䆭鎍흘\\ONK3횙<\/樚立圌Q튅k쩎Ff쁋aׂJK銆ઘ즐狩6༥✙䩜篥CzP(聻駇HHퟲ讃%,ά{렍p而刲vy䦅ክ^톺M楒鍢㹳]Mdg2>䤉洞", "踛M젧>忔芿㌜Zk": 2215369545966507819, "씐A`$槭頰퍻^U覒\bG毲aᣴU;8!팲f꜇E⸃_卵{嫏羃X쀳C7뗮m(嚼u N܁谟D劯9]#": true, "ﻩ!뵸-筚P᭛}ἰ履lPh?౮ⶹꆛ穉뎃g萑㑓溢CX뾇G㖬A錟]RKaꄘ]Yo+@䘁's섎襠$^홰}F": null }, "粘ꪒ4HXᕘ蹵.$區\r\u001d묁77pPc^y笲Q<\/ꖶ 訍䃍ᨕG?*": 1.73773035935040224E17 }, "婅拳?bkU;#D矠❴vVN쩆t㜷A풃갮娪a%鮏絪3dAv룒#tm쑬⌛qYwc4|L8KZ;xU⓭㳔밆拓EZ7襨eD|隰ऌ䧼u9Ԣ+]贴P荿": 2.9628516456987075E18 }]}}] ]} }} ]}] ], "|g翉F*湹̶\u0005⏐1脉̀eI쩓ᖂ㫱0碞l䴨ꑅ㵽7AtἈ턧yq䳥塑:z:遀ᄐX눔擉)`N3昛oQ셖y-ڨ⾶恢ꈵq^<\/": null, "菹\\랓G^璬x৴뭸ゆUS겧﮷Bꮤ ┉銜᯻0%N7}~f洋坄Xꔼ<\/4妟Vꄟ9:౟곡t킅冩䧉笭裟炂4봋ⱳ叺怊t+怯涗\"0㖈Hq": false, "졬믟'ﺇফ圪쓬멤m邸QLব䗁愍4jvs翙 ྍ꧀艳H-|": null, "컮襱⣱뗠 R毪/鹙꾀%헳8&": -5770986448525107020 } ], "B䔚bꐻ뙏姓展槰T-똌鷺tc灿᫽^㓟䏀o3o$꘭趙萬I顩)뇭Ἑ䓝\f@{ᣨ`x3蔛": null } ] ] }], "⦖扚vWꃱ꥙㾠壢輓{-⎳鹷贏璿䜑bG倛⋐磎c皇皩7a~ﳫU╣Q࠭ꎉS摅姽OW.홌ೞ.": null, "蚪eVlH献r}ᮏ믠ﰩꔄ@瑄ⲱ": null, "퀭$JWoꩢg역쁍䖔㑺h&ୢtXX愰㱇?㾫I_6 OaB瑈q裿": null, "꽦ﲼLyr纛Zdu珍B絟쬴糔?㕂짹䏵e": "ḱ\u2009cX9멀i䶛簆㳀k" } ]]]], "(_ꏮg່澮?ᩑyM<艷\u001aꪽ\\庼뙭Z맷㰩Vm\\lY筺]3㋲2㌩㄀Eਟ䝵⨄쐨ᔟgङHn鐖⤇놋瓇Q탚單oY\"♆臾jHᶈ征ቄ??uㇰA?#1侓": null }, "觓^~ሢ&iI띆g륎ḱ캀.ᓡꀮ胙鈉": 1.0664523593012836E-19, "y詭Gbᔶऽs댁U:杜⤎ϲ쁗⮼D醄诿q뙰I#즧v蔎xHᵿt᡽[**?崮耖p缫쿃L菝,봬ꤦC쯵#=X1瞻@OZc鱗CQTx": null } ] }}], "剘紁\u0004\\Xn⊠6,တױ;嵣崇}讃iႽ)d1\\䔓": null }, "脨z\"{X,1u찜<'k&@?1}Yn$\u0015Rd輲ーa쮂굄+B$l": true, "諳>*쭮괐䵟Ґ+<箁}빀䅱⡔檏臒hIH脟ꩪC핝ଗP좕\"0i<\/C褻D۞恗+^5?'ꂱ䚫^7}㡠cq6\\쨪ꔞꥢ?纖䫀氮蒫侲빦敶q{A煲G": -6880961710038544266 }}] }, "5s⨲JvಽῶꭂᄢI.a৊": null, "?1q꽏쿻ꛋDR%U娝>DgN乭G": -1.2105047302732358E-19 } ] ]}, "qZz`撋뙹둣j碇쁏\\ꆥ\u0018@藴疰Wz)O{F䶛l᷂绘訥$]뮍夻䢋䩇萿獰樧猵⣭j萶q)$꬚⵷0馢W:Ⱍ!Qoe": -1666634370862219540, "t": "=wp|~碎Q鬳Ӎ\\l-<\/^ﳊhn퐖}䍔t碵ḛ혷?靻䊗", "邙쇡㯇%#=,E4勃驆V繚q[Y댻XV㡸[逹ᰏ葢B@u=JS5?bLRn얮㍉⏅ﰳ?a6[&큟!藈": 1.2722786745736667E-19 }, "X블땨4{ph鵋ꉯ웸 5p簂䦭s_E徔濧d稝~No穔噕뽲)뉈c5M윅>⚋[岦䲟懷恁?鎐꓆ฬ爋獠䜔s{\u001bm鐚儸煛%bﯿXT>ꗘ@8G": 1157841540507770724, "媤娪Q杸\u0011SAyᡈ쿯": true, "灚^ಸ%걁<\/蛯?\"祴坓\\\\'흍": -3.4614808555942579E18, "釴U:O湛㴑䀣렑縓\ta)(j:숾却䗌gCiB뽬Oyuq輥厁/7)?今hY︺Q": null } ] ]]]}] ], "I笔趠Ph!<ཛྷ㸞诘X$畉F\u0005笷菟.Esr릙!W☆䲖뗷莾뒭U\"䀸犜Uo3Gꯌx4r蔇᡹㧪쨢準<䂀%ࡡꟼ瑍8炝Xs0䀝销?fi쥱ꆝલBB": -8571484181158525797, "L⦁o#J|\"⽩-㱢d㌛8d\\㶤傩儻E[Y熯)r噤὘勇 }": "e(濨쓌K䧚僒㘍蠤Vᛸ\"络QJL2,嬓왍伢㋒䴿考澰@(㏾`kX$끑эE斡,蜍&~y", "vj.|统圪ᵮPL?2oŶ`밧\"勃+0ue%⿥绬췈체$6:qa렐Q;~晘3㙘鹑": true, "ශؙ4獄⶿c︋i⚅:ん閝Ⳙ苆籦kw{䙞셕pC췃ꍬ␜꟯ꚓ酄b힝hwk꭭M鬋8B耳쑘WQ\\偙ac'唀x᪌\u2048*h짎#ፇ鮠뾏ឿ뀌": false, "⎀jꄒ牺3Ⓝ컴~?親ꕽぼܓ喏瘘!@<튋㐌꿱⩦{a?Yv%⪧笯Uܱ栅E搚i뚬:ꄃx7䙳ꦋ&䓹vq☶I䁘ᾘ涜\\썉뺌Lr%Bc㍜3?ꝭ砿裞]": null, "⭤뙓z(㡂%亳K䌽꫿AԾ岺㦦㼴輞낚Vꦴw냟鬓㹈뽈+o3譻K1잞": 2091209026076965894, "ㇲ\t⋇轑ꠤ룫X긒\"zoY읇희wj梐쐑l侸`e%s": -9.9240075473576563E17, "啸ꮑ㉰!ᚓ}銏": -4.0694813896301194E18, ">]囋੽EK뇜>_ꀣ緳碖{쐐裔[<ನ\"䇅\"5L?#xTwv#罐\u0005래t应\\N?빗;": "v쮽瞭p뭃" } ]], "斴槾?Z翁\"~慍弞ﻆ=꜡o5鐋dw\"?K蠡i샾ogDﲰ_C*⬟iㇷ4nય蟏[㟉U꽌娛苸 ঢ়操贻洞펻)쿗૊許X⨪VY츚Z䍾㶭~튃ᵦ<\/E臭tve猑x嚢": null, "锡⛩<\/칥ꈙᬙ蝀&Ꚑ籬■865?_>L詏쿨䈌浿弥爫̫lj&zx<\/C쉾?覯n?": null, "꾳鑤/꼩d=ᘈn挫ᑩ䰬ZC": "3錢爋6Ƹ䴗v⪿Wr益G韠[\u0010屗9쁡钁u?殢c䳀蓃樄욂NAq赟c튒瘁렶Aૡɚ捍" } ] ] ]} ] ] }]]]}} ]}], "Ej䗳U<\/Q=灒샎䞦,堰頠@褙g_\u0003ꤾfⶽ?퇋!łB〙ד3CC䌴鈌U:뭔咎(Qો臃䡬荋BO7㢝䟸\"Yb": 2.36010731779814E-20, "逸'0岔j\u000e눘먷翌C츊秦=ꭣ棭ှ;鳸=麱$XP⩉駚橄A\\좱⛌jqv䰞3Ь踌v㳆¹gT┌gvLB賖烡m?@E঳i": null }, "曺v찘ׁ?&绫O័": 9107241066550187880 } ] ], "(e屄\u0019昜훕琖b蓘ᬄ0/۲묇Z蘮ဏ⨏蛘胯뢃@㘉8ሪWᨮ⦬ᅳ䅴HI၇쨳z囕陻엣1赳o": true, ",b刈Z,ၠ晐T솝ŕB⩆ou'퐼≃绗雗d譊": null, "a唥KB\"ﳝ肕$u\n^⅄P䟼냉䞸⩪u윗瀱ꔨ#yşs꒬=1|ﲤ爢`t౐튼쳫_Az(Ṋ擬㦷좕耈6": 2099309172767331582, "?㴸U<\/䢔ꯡ阽扆㐤q鐋?f㔫wM嬙-;UV죫嚔픞G&\"Cᗍ䪏풊Q": "VM7疹+陕枡툩窲}翡䖶8欞čsT뮐}璤:jﺋ鎴}HfA൝⧻Zd#Qu茅J髒皣Y-︴[?-~쉜v딏璮㹚䅊﩯<-#\u000e걀h\u0004u抱﵊㼃U<㱷⊱IC進" }, "숌dee節鏽邺p넱蹓+e罕U": true } ], "b⧴룏??ᔠ3ぱ>%郿劃翐ꏬꠛW瞳᫏누躨狀ໄy੽\"ីuS=㨞馸k乆E": "トz݈^9R䬑<ﮛGRꨳ\u000fTT泠纷꽀MRᴱ纊:㠭볮?%N56%鈕1䗍䜁a䲗j陇=뿻偂衋࿘ᓸ?ᕵZ+<\/}H耢b䀁z^f$&㝒LkꢳI脚뙛u": 5.694374481577558E-20 }] } ]], "obj": {"key": "wrong value"}, "퓲꽪m{㶩/뇿#⼢&᭙硞㪔E嚉c樱㬇1a綑᝖DḾ䝩": null } } ================================================ FILE: C/thirdparty/rapidjson/bin/data/webapp.json ================================================ {"web-app": { "servlet": [ { "servlet-name": "cofaxCDS", "servlet-class": "org.cofax.cds.CDSServlet", "init-param": { "configGlossary:installationAt": "Philadelphia, PA", "configGlossary:adminEmail": "ksm@pobox.com", "configGlossary:poweredBy": "Cofax", "configGlossary:poweredByIcon": "/images/cofax.gif", "configGlossary:staticPath": "/content/static", "templateProcessorClass": "org.cofax.WysiwygTemplate", "templateLoaderClass": "org.cofax.FilesTemplateLoader", "templatePath": "templates", "templateOverridePath": "", "defaultListTemplate": "listTemplate.htm", "defaultFileTemplate": "articleTemplate.htm", "useJSP": false, "jspListTemplate": "listTemplate.jsp", "jspFileTemplate": "articleTemplate.jsp", "cachePackageTagsTrack": 200, "cachePackageTagsStore": 200, "cachePackageTagsRefresh": 60, "cacheTemplatesTrack": 100, "cacheTemplatesStore": 50, "cacheTemplatesRefresh": 15, "cachePagesTrack": 200, "cachePagesStore": 100, "cachePagesRefresh": 10, "cachePagesDirtyRead": 10, "searchEngineListTemplate": "forSearchEnginesList.htm", "searchEngineFileTemplate": "forSearchEngines.htm", "searchEngineRobotsDb": "WEB-INF/robots.db", "useDataStore": true, "dataStoreClass": "org.cofax.SqlDataStore", "redirectionClass": "org.cofax.SqlRedirection", "dataStoreName": "cofax", "dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver", "dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon", "dataStoreUser": "sa", "dataStorePassword": "dataStoreTestQuery", "dataStoreTestQuery": "SET NOCOUNT ON;select test='test';", "dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log", "dataStoreInitConns": 10, "dataStoreMaxConns": 100, "dataStoreConnUsageLimit": 100, "dataStoreLogLevel": "debug", "maxUrlLength": 500}}, { "servlet-name": "cofaxEmail", "servlet-class": "org.cofax.cds.EmailServlet", "init-param": { "mailHost": "mail1", "mailHostOverride": "mail2"}}, { "servlet-name": "cofaxAdmin", "servlet-class": "org.cofax.cds.AdminServlet"}, { "servlet-name": "fileServlet", "servlet-class": "org.cofax.cds.FileServlet"}, { "servlet-name": "cofaxTools", "servlet-class": "org.cofax.cms.CofaxToolsServlet", "init-param": { "templatePath": "toolstemplates/", "log": 1, "logLocation": "/usr/local/tomcat/logs/CofaxTools.log", "logMaxSize": "", "dataLog": 1, "dataLogLocation": "/usr/local/tomcat/logs/dataLog.log", "dataLogMaxSize": "", "removePageCache": "/content/admin/remove?cache=pages&id=", "removeTemplateCache": "/content/admin/remove?cache=templates&id=", "fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder", "lookInContext": 1, "adminGroupID": 4, "betaServer": true}}], "servlet-mapping": { "cofaxCDS": "/", "cofaxEmail": "/cofaxutil/aemail/*", "cofaxAdmin": "/admin/*", "fileServlet": "/static/*", "cofaxTools": "/tools/*"}, "taglib": { "taglib-uri": "cofax.tld", "taglib-location": "/WEB-INF/tlds/cofax.tld"}}} ================================================ FILE: C/thirdparty/rapidjson/bin/data/widget.json ================================================ {"widget": { "debug": "on", "window": { "title": "Sample Konfabulator Widget", "name": "main_window", "width": 500, "height": 500 }, "image": { "src": "Images/Sun.png", "name": "sun1", "hOffset": 250, "vOffset": 250, "alignment": "center" }, "text": { "data": "Click Here", "size": 36, "style": "bold", "name": "text1", "hOffset": 250, "vOffset": 100, "alignment": "center", "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" } }} ================================================ FILE: C/thirdparty/rapidjson/bin/encodings/utf8.json ================================================ { "en":"I can eat glass and it doesn't hurt me.", "zh-Hant":"我能吞下玻璃而不傷身體。", "zh-Hans":"我能吞下玻璃而不伤身体。", "ja":"私はガラスを食べられます。それは私を傷つけません。", "ko":"나는 유리를 먹을 수 있어요. 그래도 아프지 않아요" } ================================================ FILE: C/thirdparty/rapidjson/bin/encodings/utf8bom.json ================================================ { "en":"I can eat glass and it doesn't hurt me.", "zh-Hant":"我能吞下玻璃而不傷身體。", "zh-Hans":"我能吞下玻璃而不伤身体。", "ja":"私はガラスを食べられます。それは私を傷つけません。", "ko":"나는 유리를 먹을 수 있어요. 그래도 아프지 않아요" } ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail1.json ================================================ "A JSON payload should be an object or array, not a string." ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail10.json ================================================ {"Extra value after close": true} "misplaced quoted value" ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail11.json ================================================ {"Illegal expression": 1 + 2} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail12.json ================================================ {"Illegal invocation": alert()} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail13.json ================================================ {"Numbers cannot have leading zeroes": 013} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail14.json ================================================ {"Numbers cannot be hex": 0x14} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail15.json ================================================ ["Illegal backslash escape: \x15"] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail16.json ================================================ [\naked] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail17.json ================================================ ["Illegal backslash escape: \017"] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail18.json ================================================ [[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail19.json ================================================ {"Missing colon" null} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail2.json ================================================ ["Unclosed array" ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail20.json ================================================ {"Double colon":: null} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail21.json ================================================ {"Comma instead of colon", null} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail22.json ================================================ ["Colon instead of comma": false] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail23.json ================================================ ["Bad value", truth] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail24.json ================================================ ['single quote'] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail25.json ================================================ [" tab character in string "] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail26.json ================================================ ["tab\ character\ in\ string\ "] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail27.json ================================================ ["line break"] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail28.json ================================================ ["line\ break"] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail29.json ================================================ [0e] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail3.json ================================================ {unquoted_key: "keys must be quoted"} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail30.json ================================================ [0e+] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail31.json ================================================ [0e+-1] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail32.json ================================================ {"Comma instead if closing brace": true, ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail33.json ================================================ ["mismatch"} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail4.json ================================================ ["extra comma",] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail5.json ================================================ ["double extra comma",,] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail6.json ================================================ [ , "<-- missing value"] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail7.json ================================================ ["Comma after the close"], ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail8.json ================================================ ["Extra close"]] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/fail9.json ================================================ {"Extra comma": true,} ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/pass1.json ================================================ [ "JSON Test Pattern pass1", {"object with 1 member":["array with 1 element"]}, {}, [], -42, true, false, null, { "integer": 1234567890, "real": -9876.543210, "e": 0.123456789e-12, "E": 1.234567890E+34, "": 23456789012E66, "zero": 0, "one": 1, "space": " ", "quote": "\"", "backslash": "\\", "controls": "\b\f\n\r\t", "slash": "/ & \/", "alpha": "abcdefghijklmnopqrstuvwyz", "ALPHA": "ABCDEFGHIJKLMNOPQRSTUVWYZ", "digit": "0123456789", "0123456789": "digit", "special": "`1~!@#$%^&*()_+-={':[,]}|;.?", "hex": "\u0123\u4567\u89AB\uCDEF\uabcd\uef4A", "true": true, "false": false, "null": null, "array":[ ], "object":{ }, "address": "50 St. James Street", "url": "http://www.JSON.org/", "comment": "// /* */": " ", " s p a c e d " :[1,2 , 3 , 4 , 5 , 6 ,7 ],"compact":[1,2,3,4,5,6,7], "jsontext": "{\"object with 1 member\":[\"array with 1 element\"]}", "quotes": "" \u0022 %22 0x22 034 "", "\/\\\"\uCAFE\uBABE\uAB98\uFCDE\ubcda\uef4A\b\f\n\r\t`1~!@#$%^&*()_+-=[]{}|;:',./<>?" : "A key can be any string" }, 0.5 ,98.6 , 99.44 , 1066, 1e1, 0.1e1, 1e-1, 1e00,2e+00,2e-00 ,"rosebud"] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/pass2.json ================================================ [[[[[[[[[[[[[[[[[[["Not too deep"]]]]]]]]]]]]]]]]]]] ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/pass3.json ================================================ { "JSON Test Pattern pass3": { "The outermost value": "must be an object or array.", "In this test": "It is an object." } } ================================================ FILE: C/thirdparty/rapidjson/bin/jsonchecker/readme.txt ================================================ Test suite from http://json.org/JSON_checker/. If the JSON_checker is working correctly, it must accept all of the pass*.json files and reject all of the fail*.json files. ================================================ FILE: C/thirdparty/rapidjson/bin/types/booleans.json ================================================ [ true, true, false, false, true, true, true, false, false, true, false, false, true, false, false, false, true, false, false, true, true, false, true, true, true, false, false, false, true, false, true, false, false, true, true, true, true, true, true, false, false, true, false, false, false, true, true, false, true, true, false, true, false, true, true, true, false, false, false, true, false, false, false, true, true, false, true, true, true, true, true, true, true, true, false, false, false, false, false, true, true, true, true, true, true, true, false, false, false, true, false, false, false, true, true, true, false, false, true, false ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/floats.json ================================================ [ 135.747111636, 123.377054008, 140.527504552, -72.299143906, -23.851678949, 73.586193519, -158.299382442, 177.477876032, 32.268518982, -139.560009969, 115.203105183, -106.025823607, 167.224138231, 103.378383732, -97.498486285, 18.184723416, 69.137075711, 33.849002681, -120.185228215, -20.841408615, -172.659492727, -2.691464061, 22.426164066, -98.416909437, -31.603082708, -85.072296561, 108.620987395, -43.127078238, -126.473562057, -158.595489097, -57.890678254, -13.254016573, -85.024504709, 171.663552644, -146.495558248, -10.606748276, -118.786969354, 153.352057804, -45.215545083, 37.038725288, 106.344071897, -64.607402031, 85.148030911, 28.897784566, 39.51082061, 20.450382102, -113.174943618, 71.60785784, -168.202648062, -157.338200017, 10.879588527, -114.261694831, -5.622927072, -173.330830616, -29.47002003, -39.829034201, 50.031545162, 82.815735508, -119.188760828, -48.455928081, 163.964263034, 46.30378861, -26.248889762, -47.354615322, 155.388677633, -166.710356904, 42.987233558, 144.275297374, 37.394383186, -122.550388725, 177.469945914, 101.104677413, 109.429869885, -104.919625624, 147.522756541, -81.294703727, 122.744731363, 81.803603684, 26.321556167, 147.045441354, 147.256895816, -174.211095908, 52.518769316, -78.58250334, -173.356685435, -107.728209264, -69.982325771, -113.776095893, -35.785267074, -105.748545976, -30.206523864, -76.185311723, -126.400112781, -26.864958639, 56.840053629, 93.781553535, -116.002949803, -46.617140948, 176.846840093, -144.24821335 ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/guids.json ================================================ [ "d35bf0d4-8d8f-4e17-a5c3-ad9bfd675266", "db402774-eeb6-463b-9986-c458c44d8b5a", "2a2e4101-b5f2-40b8-8750-e03f01661e60", "76787cfa-f4eb-4d62-aaad-e1d588d00ad5", "fd73894b-b500-4a7c-888c-06b5bd9cec65", "cce1862a-cf31-4ef2-9e23-f1d23b4e6163", "00a98bb0-2b6e-4368-8512-71c21aa87db7", "ab9a8d69-cec7-4550-bd35-3ed678e22782", "f18b48e1-5114-4fbe-9652-579e8d66950e", "4efe3baa-7ac5-4d6a-a839-6b9cfe825764", "b4aec119-5b0a-434c-b388-109816c482a5", "e0ef0cbb-127a-4a28-9831-5741b4295275", "d50286a5-cb7b-4c9e-be99-f214439bae8c", "a981094c-f1ac-42ed-a9fa-86404c7210ff", "2a34ee57-5815-4829-b77b-eeebaa8fe340", "a0530d44-48f8-4eff-b9ea-8810c4308351", "c6f91509-83e1-4ea1-9680-e667fbfd56ee", "cab11402-dcdd-4454-b190-6da124947395", "283d159c-2b18-4856-b4c7-5059252eaa15", "146157c6-72a8-4051-9991-cb6ea6743d81", "aef6f269-7306-4bd2-83f7-6d5605b5dc9a", "37fe6027-d638-4017-80a9-e7b0567b278e", "5003d731-33fb-4159-af61-d76348a44079", "e0e06979-5f80-4713-9fe0-8a4d60dc89f8", "7e85bdc3-0345-4cb6-9398-ccab06e79976", "f2ebf5af-6568-4ffe-a46d-403863fd4b66", "e0b5bb1c-b4dd-4535-9a9e-3c73f1167d46", "c852d20b-6bcb-4b12-bd57-308296c64c5a", "7ac3ae82-1818-49cd-a8a4-5ac77dfafd46", "138004a9-76e2-4ad7-bd42-e74dabdbb803", "ab25b5be-96be-45b0-b765-947b40ec36a6", "08404734-fd57-499e-a4cf-71e9ec782ede", "8dfdeb16-248b-4a21-bf89-2e22b11a4101", "a0e44ef0-3b09-41e8-ad5d-ed8e6a1a2a67", "a7981e49-188d-414a-9779-b1ad91e599d1", "329186c0-bf27-4208-baf7-c0a0a5a2d5b7", "cb5f3381-d33e-4b30-b1a9-f482623cad33", "15031262-ca73-4e3c-bd0a-fcf89bdf0caf", "6d7333d1-2e8c-4d78-bfde-5be47e70eb13", "acaa160c-670a-4e8f-ac45-49416e77d5f9", "228f87eb-cde4-4106-808b-2dbf3c7b6d2e", "2ff830a3-5445-4d8e-b161-bddd30666697", "f488bedd-ff6e-4108-b9a7-07f6da62f476", "2e12b846-0a34-478e-adf7-a438493803e6", "6686b8ef-7446-4d86-bd8c-df24119e3bfe", "e474a5c5-5793-4d41-b4ab-5423acc56ef1", "ac046573-e718-44dc-a0dc-9037eeaba6a9", "6b0e9099-cf53-4d5a-8a71-977528628fcf", "d51a3f22-0ff9-4087-ba9b-fcee2a2d8ade", "bdc01286-3511-4d22-bfb8-76d01203d366", "ca44eb84-17ff-4f27-8f1e-1bd25f4e8725", "4e9a8c2f-be0b-4913-92d2-c801b9a50d04", "7685d231-dadd-4041-9165-898397438ab7", "86f0bf26-d66a-44d8-99f5-d6768addae3b", "2ca1167c-72ba-45a0-aa42-faf033db0d0b", "199a1182-ea55-49ff-ba51-71c29cdd0aac", "be6a4dd2-c821-4aa0-8b83-d64d6644b5b2", "4c5f4781-7f80-4daa-9c20-76b183000514", "513b31bd-54fb-4d12-a427-42a7c13ff8e1", "8e211bcb-d76c-4012-83ad-74dd7d23b687", "44d5807e-0501-4f66-8779-e244d4fdca0a", "db8cd555-0563-4b7b-b00c-eada300a7065", "cb14d0c9-46cc-4797-bd3a-752b05629f07", "4f68b3ef-ac9b-47a0-b6d7-57f398a5c6a5", "77221aae-1bcf-471c-be45-7f31f733f9d6", "42a7cac8-9e80-4c45-8c71-511d863c98ea", "f9018d22-b82c-468c-bdb5-8864d5964801", "75f4e9b8-62a2-4f21-ad8a-e19eff0419bc", "9b7385c8-8653-4184-951c-b0ac1b36b42e", "571018aa-ffbf-4b42-a16d-07b57a7f5f0e", "35de4a2f-6bf1-45aa-b820-2a27ea833e44", "0b8edb20-3bb4-4cb4-b089-31957466dbab", "97da4778-9a7b-4140-a545-968148c81fb7", "969f326c-8f2a-47c5-b41c-d9c2f06c9b9d", "ae211037-8b53-4b17-bfc8-c06fc7774409", "12c5c3c4-0bd5-45d3-bc1d-d04a3c65d3e6", "ec02024f-ce43-4dd3-8169-a59f7baee043", "5b6afe77-ce48-47ca-90a0-25cd10ca5ffd", "2e3a61d4-6b8f-4d2f-ba86-878b4012efd8", "19a88a67-a5d3-4647-898f-1cde07bce040", "6db6f420-b5c8-48b9-bbb2-8864fe6fed65", "5a45dbde-7b53-4f6b-b864-e3b63be3708a", "c878321b-8a02-4239-9981-15760c2e7d15", "4e36687f-8bf6-4b12-b496-3a8e382d067e", "a59a63cd-43c0-4c6e-b208-6dbca86f8176", "303308c4-2e4a-45b5-8bf3-3e66e9ad05a1", "8b58fdf1-43a6-4c98-9547-6361b50791af", "a3563591-72ed-42b5-8e41-bac1d76d70cf", "38db8c78-3739-4f6e-8313-de4138082114", "86615bea-7e73-4daf-95da-ae6b9eee1bbb", "35d38e3e-076e-40dd-9aa8-05be2603bd59", "9f84c62d-b454-4ba3-8c19-a01878985cdc", "6721bbae-d765-4a06-8289-6fe46a1bf943", "0837796f-d0dd-4e50-9b7c-1983e6cc7c48", "021eb7d7-e869-49b9-80c3-9dd16ce2d981", "819c56f8-e040-475d-aad5-c6d5e98b20aa", "3a61ef02-735e-4229-937d-b3777a3f4e1f", "79dfab84-12e6-4ec8-bfc8-460ae71e4eca", "a106fabf-e149-476c-8053-b62388b6eb57", "9a3900a5-bfb4-4de0-baa5-253a8bd0b634" ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/integers.json ================================================ [ 8125686, 8958709, 5976222, 1889524, 7968493, 1357486, 118415, 7081097, 4635968, 7555332, 2270233, 3428352, 8699968, 2087333, 7861337, 7554440, 2017031, 7981692, 6060687, 1877715, 3297474, 8373177, 6158629, 7853641, 3004441, 9650406, 2695251, 1180761, 4988426, 6043805, 8063373, 6103218, 2848339, 8188690, 9235573, 5949816, 6116081, 6471138, 3354531, 4787414, 9660600, 942529, 7278535, 7967399, 554292, 1436493, 267319, 2606657, 7900601, 4276634, 7996757, 8544466, 7266469, 3301373, 4005350, 6437652, 7717672, 7126292, 8588394, 2127902, 7410190, 1517806, 4583602, 3123440, 7747613, 5029464, 9834390, 3087227, 4913822, 7550487, 4518144, 5862588, 1778599, 9493290, 5588455, 3638706, 7394293, 4294719, 3837830, 6381878, 7175866, 8575492, 1415229, 1453733, 6972404, 9782571, 4234063, 7117418, 7293130, 8057071, 9345285, 7626648, 3358911, 4574537, 9371826, 7627107, 6154093, 5392367, 5398105, 6956377 ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/mixed.json ================================================ [ { "favoriteFruit": "banana", "greeting": "Hello, Kim! You have 10 unread messages.", "friends": [ { "name": "Higgins Rodriquez", "id": 0 }, { "name": "James Floyd", "id": 1 }, { "name": "Gay Stewart", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "pariatur", "ad", "eiusmod", "sit", "et", "velit", "culpa" ], "longitude": -57.919246, "latitude": -36.022812, "registered": "Friday, March 21, 2014 9:13 PM", "about": "Laborum nulla aliquip ullamco proident excepteur est officia ipsum. Eiusmod exercitation minim ex do labore reprehenderit aliqua minim qui excepteur reprehenderit cupidatat. Sint enim exercitation duis id consequat nisi enim magna. Commodo aliqua id ipsum sit magna enim. Veniam officia in labore fugiat veniam ea laboris ex veniam duis.\r\n", "address": "323 Pulaski Street, Ronco, North Carolina, 7701", "phone": "+1 (919) 438-2678", "email": "kim.griffith@cipromox.biz", "company": "CIPROMOX", "name": { "last": "Griffith", "first": "Kim" }, "eyeColor": "green", "age": 26, "picture": "http://placehold.it/32x32", "balance": "$1,283.55", "isActive": false, "guid": "10ab0392-c5e2-48a3-9473-aa725bad892d", "index": 0, "_id": "551b91198238a0bcf9a41133" }, { "favoriteFruit": "banana", "greeting": "Hello, Skinner! You have 9 unread messages.", "friends": [ { "name": "Rhonda Justice", "id": 0 }, { "name": "Audra Castaneda", "id": 1 }, { "name": "Vicky Chavez", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "dolore", "enim", "sit", "non", "exercitation", "fugiat", "adipisicing" ], "longitude": -60.291407, "latitude": -84.619318, "registered": "Friday, February 7, 2014 3:17 AM", "about": "Consectetur eiusmod laboris dolore est ullamco nulla in velit quis esse Lorem. Amet aliqua sunt aute occaecat veniam officia in duis proident aliqua cupidatat mollit. Sint eu qui anim duis ut anim duis eu cillum. Cillum nostrud adipisicing tempor Lorem commodo sit in ad qui non et irure qui. Labore eu aliquip id duis eiusmod veniam.\r\n", "address": "347 Autumn Avenue, Fidelis, Puerto Rico, 543", "phone": "+1 (889) 457-2319", "email": "skinner.maddox@moltonic.co.uk", "company": "MOLTONIC", "name": { "last": "Maddox", "first": "Skinner" }, "eyeColor": "green", "age": 22, "picture": "http://placehold.it/32x32", "balance": "$3,553.10", "isActive": false, "guid": "cfbc2fb6-2641-4388-b06d-ec0212cfac1e", "index": 1, "_id": "551b91197e0abe92d6642700" }, { "favoriteFruit": "strawberry", "greeting": "Hello, Reynolds! You have 5 unread messages.", "friends": [ { "name": "Brady Valdez", "id": 0 }, { "name": "Boyer Golden", "id": 1 }, { "name": "Gladys Knapp", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "commodo", "eiusmod", "cupidatat", "et", "occaecat", "proident", "Lorem" ], "longitude": 140.866287, "latitude": 1.401032, "registered": "Monday, October 20, 2014 8:01 AM", "about": "Deserunt elit consequat ea dolor pariatur aute consectetur et nulla ipsum ad. Laboris occaecat ipsum ad duis et esse ea ut voluptate. Ex magna consequat pariatur amet. Quis excepteur non mollit dolore cillum dolor ex esse veniam esse deserunt non occaecat veniam. Sit amet proident proident amet. Nisi est id ut ut adipisicing esse fugiat non dolor aute.\r\n", "address": "872 Montague Terrace, Haena, Montana, 3106", "phone": "+1 (974) 410-2655", "email": "reynolds.sanford@combot.biz", "company": "COMBOT", "name": { "last": "Sanford", "first": "Reynolds" }, "eyeColor": "green", "age": 21, "picture": "http://placehold.it/32x32", "balance": "$3,664.47", "isActive": true, "guid": "f9933a9c-c41a-412f-a18d-e727c569870b", "index": 2, "_id": "551b91197f170b65413a06e3" }, { "favoriteFruit": "banana", "greeting": "Hello, Neva! You have 7 unread messages.", "friends": [ { "name": "Clara Cotton", "id": 0 }, { "name": "Ray Gates", "id": 1 }, { "name": "Jacobs Reese", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "magna", "labore", "incididunt", "velit", "ea", "et", "eiusmod" ], "longitude": -133.058479, "latitude": 87.803677, "registered": "Friday, May 9, 2014 5:41 PM", "about": "Do duis occaecat ut officia occaecat officia nostrud reprehenderit ex excepteur aute anim in reprehenderit. Cupidatat nulla eiusmod nulla non minim veniam aute nulla deserunt adipisicing consectetur veniam. Sit consequat ex laboris aliqua labore consectetur tempor proident consequat est. Fugiat quis esse culpa aliquip. Excepteur laborum aliquip sunt eu cupidatat magna eiusmod amet nisi labore aliquip. Ut consectetur esse aliquip exercitation nulla ex occaecat elit do ex eiusmod deserunt. Ex eu voluptate minim deserunt fugiat minim est occaecat ad Lorem nisi.\r\n", "address": "480 Eagle Street, Fostoria, Oklahoma, 2614", "phone": "+1 (983) 439-3000", "email": "neva.barker@pushcart.us", "company": "PUSHCART", "name": { "last": "Barker", "first": "Neva" }, "eyeColor": "brown", "age": 36, "picture": "http://placehold.it/32x32", "balance": "$3,182.24", "isActive": true, "guid": "52489849-78e1-4b27-8b86-e3e5ab2b7dc8", "index": 3, "_id": "551b9119a13061c083c878d5" }, { "favoriteFruit": "banana", "greeting": "Hello, Rodgers! You have 6 unread messages.", "friends": [ { "name": "Marguerite Conway", "id": 0 }, { "name": "Margarita Cunningham", "id": 1 }, { "name": "Carmela Gallagher", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "ipsum", "magna", "amet", "elit", "sit", "occaecat", "elit" ], "longitude": -125.436981, "latitude": 19.868524, "registered": "Tuesday, July 8, 2014 8:09 PM", "about": "In cillum esse tempor do magna id ad excepteur ex nostrud mollit deserunt aliqua. Minim aliqua commodo commodo consectetur exercitation nulla nisi dolore aliqua in. Incididunt deserunt mollit nostrud excepteur. Ipsum fugiat anim deserunt Lorem aliquip nisi consequat eu minim in ex duis.\r\n", "address": "989 Varanda Place, Duryea, Palau, 3972", "phone": "+1 (968) 578-2974", "email": "rodgers.conner@frenex.net", "company": "FRENEX", "name": { "last": "Conner", "first": "Rodgers" }, "eyeColor": "blue", "age": 23, "picture": "http://placehold.it/32x32", "balance": "$1,665.17", "isActive": true, "guid": "ed3b2374-5afe-4fca-9325-8a7bbc9f81a0", "index": 4, "_id": "551b91197bcedb1b56a241ce" }, { "favoriteFruit": "strawberry", "greeting": "Hello, Mari! You have 10 unread messages.", "friends": [ { "name": "Irwin Boyd", "id": 0 }, { "name": "Dejesus Flores", "id": 1 }, { "name": "Lane Mcmahon", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "esse", "aliquip", "excepteur", "dolor", "ex", "commodo", "anim" ], "longitude": -17.038176, "latitude": 17.154663, "registered": "Sunday, April 6, 2014 4:46 AM", "about": "Excepteur veniam occaecat sint nulla magna in in officia elit. Eiusmod qui dolor fugiat tempor in minim esse officia minim consequat. Lorem ullamco labore proident ipsum id pariatur fugiat consectetur anim cupidatat qui proident non ipsum.\r\n", "address": "563 Hendrickson Street, Westwood, South Dakota, 4959", "phone": "+1 (980) 434-3976", "email": "mari.fleming@beadzza.org", "company": "BEADZZA", "name": { "last": "Fleming", "first": "Mari" }, "eyeColor": "blue", "age": 21, "picture": "http://placehold.it/32x32", "balance": "$1,948.04", "isActive": true, "guid": "6bd02166-3b1f-4ed8-84c9-ed96cbf12abc", "index": 5, "_id": "551b9119b359ff6d24846f77" }, { "favoriteFruit": "strawberry", "greeting": "Hello, Maxine! You have 7 unread messages.", "friends": [ { "name": "Sullivan Stark", "id": 0 }, { "name": "Underwood Mclaughlin", "id": 1 }, { "name": "Kristy Carlson", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "commodo", "ipsum", "quis", "non", "est", "mollit", "exercitation" ], "longitude": -105.40635, "latitude": 37.197993, "registered": "Tuesday, January 20, 2015 12:30 AM", "about": "Proident ullamco Lorem est consequat consectetur non eiusmod esse nostrud pariatur eiusmod enim exercitation eiusmod. Consequat duis elit elit minim ullamco et dolor eu minim do tempor esse consequat excepteur. Mollit dolor do voluptate nostrud quis anim cillum velit tempor eiusmod adipisicing tempor do culpa. Eu magna dolor sit amet nisi do laborum dolore nisi. Deserunt ipsum et deserunt non nisi.\r\n", "address": "252 Boulevard Court, Brenton, Tennessee, 9444", "phone": "+1 (950) 466-3377", "email": "maxine.moreno@zentia.tv", "company": "ZENTIA", "name": { "last": "Moreno", "first": "Maxine" }, "eyeColor": "brown", "age": 24, "picture": "http://placehold.it/32x32", "balance": "$1,200.24", "isActive": false, "guid": "ce307a37-ca1f-43f5-b637-dca2605712be", "index": 6, "_id": "551b91195a6164b2e35f6dc8" }, { "favoriteFruit": "strawberry", "greeting": "Hello, Helga! You have 5 unread messages.", "friends": [ { "name": "Alicia Vance", "id": 0 }, { "name": "Vinson Phelps", "id": 1 }, { "name": "Francisca Kelley", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "nostrud", "eiusmod", "dolore", "officia", "sint", "non", "qui" ], "longitude": -7.275151, "latitude": 75.54202, "registered": "Wednesday, October 1, 2014 6:35 PM", "about": "Quis duis ullamco velit qui. Consectetur non adipisicing id magna anim. Deserunt est officia qui esse. Et do pariatur incididunt anim ad mollit non. Et eiusmod sunt fugiat elit mollit ad excepteur anim nisi laboris eiusmod aliquip aliquip.\r\n", "address": "981 Bush Street, Beaulieu, Vermont, 3775", "phone": "+1 (956) 506-3807", "email": "helga.burch@synkgen.name", "company": "SYNKGEN", "name": { "last": "Burch", "first": "Helga" }, "eyeColor": "blue", "age": 22, "picture": "http://placehold.it/32x32", "balance": "$3,827.89", "isActive": false, "guid": "ff5dfea0-1052-4ef2-8b66-4dc1aad0a4fb", "index": 7, "_id": "551b911946be8358ae40e90e" }, { "favoriteFruit": "banana", "greeting": "Hello, Shaw! You have 5 unread messages.", "friends": [ { "name": "Christian Cardenas", "id": 0 }, { "name": "Cohen Pennington", "id": 1 }, { "name": "Mary Lindsay", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "occaecat", "ut", "occaecat", "magna", "exercitation", "incididunt", "irure" ], "longitude": -89.102972, "latitude": 89.489596, "registered": "Thursday, August 21, 2014 5:00 PM", "about": "Amet cupidatat quis velit aute Lorem consequat pariatur mollit deserunt et sint culpa excepteur duis. Enim proident duis qui ex tempor sunt nostrud occaecat. Officia sit veniam mollit eiusmod minim do aute eiusmod fugiat qui anim adipisicing in laboris. Do tempor reprehenderit sunt laborum esse irure dolor ad consectetur aute sit id ipsum. Commodo et voluptate anim consequat do. Minim laborum ad veniam ad minim incididunt excepteur excepteur aliqua.\r\n", "address": "237 Pierrepont Street, Herbster, New York, 3490", "phone": "+1 (976) 455-2880", "email": "shaw.zamora@shadease.me", "company": "SHADEASE", "name": { "last": "Zamora", "first": "Shaw" }, "eyeColor": "blue", "age": 38, "picture": "http://placehold.it/32x32", "balance": "$3,440.82", "isActive": false, "guid": "ac5fdb0e-e1fb-427e-881d-da461be0d1ca", "index": 8, "_id": "551b9119af0077bc28a2de25" }, { "favoriteFruit": "apple", "greeting": "Hello, Melissa! You have 5 unread messages.", "friends": [ { "name": "Marion Villarreal", "id": 0 }, { "name": "Kate Rose", "id": 1 }, { "name": "Hines Simon", "id": 2 } ], "range": [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ], "tags": [ "amet", "veniam", "mollit", "ad", "cupidatat", "deserunt", "Lorem" ], "longitude": -52.735052, "latitude": 16.258838, "registered": "Wednesday, April 16, 2014 7:56 PM", "about": "Aute ut culpa eiusmod tempor duis dolor tempor incididunt. Nisi non proident excepteur eiusmod incididunt nisi minim irure sit. In veniam commodo deserunt proident reprehenderit et consectetur ullamco quis nulla cupidatat.\r\n", "address": "642 Halsey Street, Blandburg, Kansas, 6761", "phone": "+1 (941) 539-3851", "email": "melissa.vaughn@memora.io", "company": "MEMORA", "name": { "last": "Vaughn", "first": "Melissa" }, "eyeColor": "brown", "age": 24, "picture": "http://placehold.it/32x32", "balance": "$2,399.44", "isActive": true, "guid": "1769f022-a7f1-4a69-bf4c-f5a5ebeab2d1", "index": 9, "_id": "551b9119b607c09c7ffc3b8a" } ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/nulls.json ================================================ [ null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/paragraphs.json ================================================ [ "Commodo ullamco cupidatat nisi sit proident ex. Cillum pariatur occaecat in officia do commodo nisi cillum tempor minim. Ad dolor ut et aliquip fugiat eu officia cupidatat occaecat consectetur eiusmod veniam enim officia.\r\n", "Adipisicing cillum laborum nisi irure. Cillum dolor proident duis nulla qui mollit dolore reprehenderit mollit. Irure nulla dolor ipsum irure nulla quis laboris do.\r\n", "Est adipisicing consectetur incididunt in. Occaecat ea magna ex consequat irure sit laborum cillum officia magna sunt do exercitation aliquip. Laboris id aute in dolore reprehenderit voluptate non deserunt laborum.\r\n", "Consectetur eu aute est est occaecat adipisicing sint enim dolor eu. Tempor amet id non mollit eu consectetur cillum duis. Eu labore velit nulla ipsum commodo consequat aliquip. Cupidatat commodo dolore mollit enim sit excepteur nisi duis laboris deserunt esse.\r\n", "Incididunt ullamco est fugiat enim fugiat. Do sit mollit anim ad excepteur eu laboris exercitation officia labore nulla ut. Voluptate non voluptate cillum sit et voluptate anim duis velit consequat aliquip dolor. Elit et et esse laboris consectetur officia eiusmod aliquip nisi est. Qui labore dolore ad dolor.\r\n", "Anim adipisicing est irure proident sit officia ullamco voluptate sunt consectetur duis mollit excepteur veniam. Nostrud ut duis aute exercitation officia et quis elit commodo elit tempor aute aliquip enim. Est officia non cillum consequat voluptate ipsum sit voluptate nulla id.\r\n", "Ipsum enim consectetur aliquip nulla commodo ut ex aliqua elit duis do. Officia et sunt aliqua dolor minim voluptate veniam esse elit enim. Adipisicing reprehenderit duis ex magna non in fugiat sunt ipsum nostrud fugiat aliquip. Labore voluptate id officia voluptate eu. Magna do nostrud excepteur sunt aliqua adipisicing qui.\r\n", "Est occaecat non non cupidatat laborum qui. Veniam sit est voluptate labore sit irure consectetur fugiat. Anim enim enim fugiat exercitation anim ad proident esse in aliqua. Laboris ut aute culpa ullamco.\r\n", "Sit et aliquip cupidatat deserunt eiusmod sint aliquip occaecat nostrud aliqua elit commodo ut magna. Amet sit est deserunt id duis in officia pariatur cupidatat ex. Mollit duis est consequat nulla aute velit ipsum sit consectetur pariatur ut non ex ipsum. Tempor esse velit pariatur reprehenderit et nostrud commodo laborum mollit labore.\r\n", "Aliquip irure quis esse aliquip. Ex non deserunt culpa aliqua ad anim occaecat ad. Lorem consectetur mollit eu consectetur est non nisi non ipsum. Qui veniam ullamco officia est ut excepteur. Nulla elit dolore cupidatat aliqua enim Lorem elit consequat eiusmod non aliqua eu in. Pariatur in culpa labore sint ipsum consectetur occaecat ad ex ipsum laboris aliquip officia. Non officia eiusmod nisi officia id id laboris deserunt sunt enim magna mollit sit.\r\n", "Mollit velit laboris laborum nulla aliquip consequat Lorem non incididunt irure. Eu voluptate sint do consectetur tempor sit Lorem in. Laborum eiusmod nisi Lorem ipsum dolore do aute laborum occaecat aute sunt. Sit laborum in ea do ipsum officia irure cillum irure nisi laboris. Ad anim deserunt excepteur ea veniam eiusmod culpa velit veniam. Commodo incididunt ea Lorem eu enim esse nisi incididunt mollit.\r\n", "Velit proident sunt aute dolore reprehenderit culpa. Pariatur reprehenderit commodo ad ea voluptate anim nulla ipsum eu irure fugiat aliqua et. Adipisicing incididunt anim excepteur voluptate minim qui culpa. Sunt veniam enim reprehenderit magna magna. Sit ad amet deserunt ut aute dolore ad minim.\r\n", "Esse ullamco sunt mollit mollit. Eu enim dolore laboris cupidatat. Cupidatat adipisicing non aute exercitation fugiat. Non ut cillum labore fugiat aliquip ex duis quis consectetur ut nisi Lorem amet qui. Proident veniam amet qui reprehenderit duis qui. Nisi culpa sit occaecat ullamco occaecat laborum fugiat ut. Non duis deserunt culpa duis.\r\n", "Id ipsum eiusmod laboris non est ipsum deserunt labore duis reprehenderit deserunt. Sint tempor fugiat eiusmod nostrud in ut laborum esse in nostrud sit deserunt nostrud reprehenderit. Cupidatat aliqua qui anim consequat eu quis consequat consequat elit ipsum pariatur. Cupidatat in dolore velit quis. Exercitation cillum ullamco ex consectetur commodo tempor incididunt exercitation labore ad dolore. Minim incididunt consequat adipisicing esse eu eu voluptate.\r\n", "Anim sint eiusmod nisi anim do deserunt voluptate ut cillum eiusmod esse ex reprehenderit laborum. Dolore nulla excepteur duis excepteur. Magna nisi nostrud duis non commodo velit esse ipsum Lorem incididunt. Nulla enim consequat ad aliqua. Incididunt irure culpa nostrud ea aute ex sit non ad esse.\r\n", "Ullamco nostrud cupidatat adipisicing anim fugiat mollit eu. Et ut eu in nulla consequat. Sunt do pariatur culpa non est.\r\n", "Pariatur incididunt reprehenderit non qui excepteur cillum exercitation nisi occaecat ad. Lorem aliquip laborum commodo reprehenderit sint. Laboris qui ut veniam magna quis et et ullamco voluptate. Tempor reprehenderit deserunt consequat nisi. Esse duis sint in tempor. Amet aute cupidatat in sint et.\r\n", "Est officia nisi dolore consequat irure et excepteur. Sit qui elit tempor magna qui cillum anim amet proident exercitation proident. Eu cupidatat laborum consectetur duis ullamco irure nulla. Adipisicing culpa non reprehenderit anim aute.\r\n", "Eu est laborum culpa velit dolore non sunt. Tempor magna veniam ea sit non qui Lorem qui exercitation aliqua aliqua et excepteur eiusmod. Culpa aute anim proident culpa adipisicing duis tempor elit aliquip elit nulla laboris esse dolore. Sit adipisicing non dolor eiusmod occaecat cupidatat.\r\n", "Culpa velit eu esse sunt. Laborum irure aliqua reprehenderit velit ipsum fugiat officia dolor ut aute officia deserunt. Ipsum sit quis fugiat nostrud aliqua cupidatat ex pariatur et. Cillum proident est irure nisi dolor aliqua deserunt esse occaecat velit dolor.\r\n", "Exercitation nulla officia sit eiusmod cillum eu incididunt officia exercitation qui Lorem deserunt. Voluptate Lorem minim commodo laborum esse in duis excepteur do duis aliquip nisi voluptate consectetur. Amet tempor officia enim ex esse minim reprehenderit.\r\n", "Laboris sint deserunt ad aute incididunt. Anim officia sunt elit qui laborum labore commodo irure non. Mollit adipisicing ullamco do aute nulla eu laborum et quis sint aute adipisicing amet. Aliqua officia irure nostrud duis ex.\r\n", "Eiusmod ipsum aliqua reprehenderit esse est non aute id veniam eiusmod. Elit consequat ad sit tempor elit eu incididunt quis irure ad. Eu incididunt veniam consequat Lorem nostrud cillum officia ea consequat ad cillum. Non nisi irure cupidatat incididunt pariatur incididunt. Duis velit officia ad cillum qui. Aliquip consequat sint aute nisi cillum. Officia commodo nisi incididunt laborum nisi voluptate aliquip Lorem cupidatat anim consequat sit laboris.\r\n", "Veniam cupidatat et incididunt mollit do ex voluptate veniam nostrud labore esse. Eiusmod irure sint fugiat esse. Aute irure consectetur ut mollit nulla sint esse. Lorem ut quis ex proident nostrud mollit nostrud ea duis duis in magna anim consectetur.\r\n", "Irure culpa esse qui do dolor fugiat veniam ad. Elit commodo aute elit magna incididunt tempor pariatur velit irure pariatur cillum et ea ad. Ad consequat ea et ad minim ut sunt qui commodo voluptate. Laboris est aliquip anim reprehenderit eu officia et exercitation. Occaecat laboris cupidatat Lorem ullamco in nostrud commodo ipsum in quis esse ex.\r\n", "Incididunt officia quis voluptate eiusmod esse nisi ipsum quis commodo. Eiusmod dolore tempor occaecat sit exercitation aliqua minim consequat minim mollit qui ad nisi. Aute quis irure adipisicing veniam nisi nisi velit deserunt incididunt anim nostrud.\r\n", "Voluptate exercitation exercitation id minim excepteur excepteur mollit. Fugiat aute proident nulla ullamco ea. Nisi ea culpa duis dolore veniam anim tempor officia in dolore exercitation exercitation. Dolore quis cillum adipisicing sunt do nulla esse proident ad sint.\r\n", "Laborum ut mollit sint commodo nulla laborum deserunt Lorem magna commodo mollit tempor deserunt ut. Qui aliquip commodo ea id. Consectetur dolor fugiat dolor excepteur eiusmod. Eu excepteur ex aute ex ex elit ex esse officia cillum exercitation. Duis ut labore ea nostrud excepteur. Reprehenderit labore aute sunt nisi quis Lorem officia. Ad aliquip cupidatat voluptate exercitation voluptate ad irure magna quis.\r\n", "Tempor velit veniam sit labore elit minim do elit cillum eiusmod sunt excepteur nisi. Aliquip est deserunt excepteur duis fugiat incididunt veniam fugiat. Pariatur sit irure labore et minim non. Cillum quis aute anim sint laboris laboris ullamco exercitation nostrud. Nulla pariatur id laborum minim nisi est adipisicing irure.\r\n", "Irure exercitation laboris nostrud in do consectetur ad. Magna aliqua Lorem culpa exercitation sint do culpa incididunt mollit eu exercitation. Elit tempor Lorem dolore enim deserunt. Anim et ullamco sint ullamco mollit cillum officia et. Proident incididunt laboris aliquip laborum sint veniam deserunt eu consequat deserunt voluptate laboris. Anim Lorem non laborum exercitation voluptate. Cupidatat reprehenderit culpa Lorem fugiat enim minim consectetur tempor quis ad reprehenderit laboris irure.\r\n", "Deserunt elit mollit nostrud occaecat labore reprehenderit laboris ex. Esse reprehenderit adipisicing cillum minim in esse aliquip excepteur ex et nisi cillum quis. Cillum labore ut ex sunt. Occaecat proident et mollit magna consequat irure esse. Dolor do enim esse nisi ad.\r\n", "Pariatur est anim cillum minim elit magna adipisicing quis tempor proident nisi laboris incididunt cupidatat. Nulla est adipisicing sit adipisicing id nostrud amet qui consequat eiusmod tempor voluptate ad. Adipisicing non magna sit occaecat magna mollit ad ex nulla velit ea pariatur. Irure labore ad ea exercitation ex cillum.\r\n", "Lorem fugiat eu eu cillum nulla tempor sint. Lorem id officia nulla velit labore ut duis ad tempor non. Excepteur quis aute adipisicing nisi nisi consectetur aliquip enim Lorem id ullamco cillum sint voluptate. Qui aliquip incididunt tempor aliqua voluptate labore reprehenderit. Veniam eiusmod elit occaecat voluptate tempor culpa consectetur ea ut exercitation eiusmod exercitation qui.\r\n", "Aliqua esse pariatur nulla veniam velit ea. Aliquip consectetur tempor ex magna sit aliquip exercitation veniam. Dolor ullamco minim commodo pariatur. Et amet reprehenderit dolore proident elit tempor eiusmod eu incididunt enim ullamco. Adipisicing id officia incididunt esse dolor sunt cupidatat do deserunt mollit do non. Magna ut officia fugiat adipisicing quis ea cillum laborum dolore ad nostrud magna minim est. Dolor voluptate officia proident enim ea deserunt eu voluptate dolore proident laborum officia ea.\r\n", "Culpa aute consequat esse fugiat cupidatat minim voluptate voluptate eiusmod irure anim elit. Do eiusmod culpa laboris consequat incididunt minim nostrud eiusmod commodo velit ea ullamco proident. Culpa pariatur magna ut mollit nisi. Ea officia do magna deserunt minim nisi tempor ea deserunt veniam cillum exercitation esse.\r\n", "Anim ullamco nostrud commodo Lorem. Do sunt laborum exercitation proident proident magna. Lorem officia laborum laborum dolor sunt duis commodo Lorem. Officia aute adipisicing ea cupidatat ea dolore. Aliquip adipisicing pariatur consectetur aliqua sit amet officia reprehenderit laborum culpa. Occaecat Lorem eu nisi do Lorem occaecat enim eiusmod laboris id quis. Ad mollit adipisicing sunt adipisicing esse.\r\n", "Laborum quis sit adipisicing cupidatat. Veniam Lorem eiusmod esse esse sint nisi labore elit et. Deserunt aliqua mollit ut commodo aliqua non incididunt ipsum reprehenderit consectetur. Eiusmod nulla minim laboris Lorem ea Lorem aute tempor pariatur in sit. Incididunt culpa ut do irure amet irure cupidatat est anim anim culpa occaecat. Est velit consectetur eiusmod veniam reprehenderit officia sunt occaecat eiusmod ut sunt occaecat amet.\r\n", "Elit minim aute fugiat nulla ex quis. Labore fugiat sint nostrud amet quis culpa excepteur in. Consectetur exercitation cupidatat laborum sit. Aute nisi eu aliqua est deserunt eiusmod commodo dolor id. Mollit laborum esse sint ipsum voluptate reprehenderit velit et. Veniam aliquip enim in veniam Lorem voluptate quis deserunt consequat qui commodo ut excepteur aute.\r\n", "Dolore deserunt veniam aute nisi labore sunt et voluptate irure nisi anim ea. Magna nisi quis anim mollit nisi est dolor do ex aliquip elit aliquip ipsum minim. Dolore est officia nostrud eiusmod ex laborum ea amet est. Officia culpa non est et tempor consectetur exercitation tempor eiusmod enim. Ea tempor laboris qui amet ex nisi culpa dolore consectetur incididunt sunt sunt. Lorem aliquip incididunt magna do et ullamco ex elit aliqua eiusmod qui. Commodo amet dolor sint incididunt ex veniam non Lorem fugiat.\r\n", "Officia culpa enim voluptate dolore commodo. Minim commodo aliqua minim ex sint excepteur cupidatat adipisicing eu irure. Anim magna deserunt anim Lorem non.\r\n", "Cupidatat aliquip nulla excepteur sunt cupidatat cupidatat laborum cupidatat exercitation. Laboris minim ex cupidatat culpa elit. Amet enim reprehenderit aliqua laborum est tempor exercitation cupidatat ex dolore do. Do incididunt labore fugiat commodo consectetur nisi incididunt irure sit culpa sit. Elit aute occaecat qui excepteur velit proident cillum qui aliqua ex do ex. Dolore irure ex excepteur veniam id proident mollit Lorem.\r\n", "Ad commodo cillum duis deserunt elit officia consectetur veniam eiusmod. Reprehenderit et veniam ad commodo reprehenderit magna elit laboris sunt non quis. Adipisicing dolor aute proident ea magna sunt et proident in consectetur.\r\n", "Veniam exercitation esse esse veniam est nisi. Minim velit incididunt sint aute dolor anim. Fugiat cupidatat id ad nisi in voluptate dolor culpa eiusmod magna eiusmod amet id. Duis aliquip labore et ex amet amet aliquip laborum eiusmod ipsum. Quis qui ut duis duis. Minim in voluptate reprehenderit aliqua.\r\n", "Elit ut pariatur dolor veniam ipsum consequat. Voluptate Lorem mollit et esse dolore mollit Lorem ad. Elit nostrud eu Lorem labore mollit minim cupidatat officia quis minim dolore incididunt. In cillum aute cillum ut.\r\n", "Commodo laborum deserunt ut cupidatat pariatur ullamco in esse anim exercitation cillum duis. Consectetur incididunt sit esse Lorem in aute. Eiusmod mollit Lorem consequat minim reprehenderit laborum enim excepteur irure nisi elit. Laborum esse proident aute aute proident adipisicing laborum. Pariatur tempor duis incididunt qui velit pariatur ut officia ea mollit labore dolore. Cillum pariatur minim ullamco sunt incididunt culpa id ullamco exercitation consectetur. Ea exercitation consequat reprehenderit ut ullamco velit eu ad velit magna excepteur eiusmod.\r\n", "Eu deserunt magna laboris laborum laborum in consequat dolore. Officia proident consectetur proident do occaecat minim pariatur officia ipsum sit non velit officia cillum. Laborum excepteur labore eu minim eiusmod. Sit anim dolore cillum ad do minim culpa sit est ad.\r\n", "Cupidatat dolor nostrud Lorem sint consequat quis. Quis labore sint incididunt officia tempor. Fugiat nostrud in elit reprehenderit dolor. Nisi sit enim officia minim est adipisicing nulla aute labore nulla nostrud cupidatat est. Deserunt dolore qui irure Lorem esse voluptate velit qui nostrud.\r\n", "Fugiat Lorem amet nulla nisi qui amet laboris enim cillum. Dolore occaecat exercitation id labore velit do commodo ut cupidatat laborum velit fugiat mollit. Ut et aliqua pariatur occaecat. Lorem occaecat dolore quis esse enim cupidatat exercitation ut tempor sit laboris fugiat adipisicing. Est tempor ex irure consectetur ipsum magna labore. Lorem non quis qui minim nisi magna amet aliquip ex cillum fugiat tempor.\r\n", "Aliquip eiusmod laborum ipsum deserunt velit esse do magna excepteur consectetur exercitation sit. Minim ullamco reprehenderit commodo nostrud exercitation id irure ex qui ullamco sit esse laboris. Nulla cillum non minim qui cillum nisi aute proident. Dolor anim culpa elit quis excepteur aliqua eiusmod. Elit ea est excepteur consectetur sunt eiusmod enim id commodo irure amet et pariatur laboris. Voluptate magna ad magna dolore cillum cillum irure laboris ipsum officia id Lorem veniam.\r\n", "Esse sunt elit est aliquip cupidatat commodo deserunt. Deserunt pariatur ipsum qui ad esse esse magna qui cillum laborum. Exercitation veniam pariatur elit amet enim.\r\n", "Esse quis in id elit nulla occaecat incididunt. Et amet Lorem mollit in veniam do. Velit mollit Lorem consequat commodo Lorem aliquip cupidatat. Minim consequat nostrud nulla in nostrud.\r\n", "Cillum nulla et eu est nostrud quis elit cupidatat dolor enim excepteur exercitation nisi voluptate. Nulla dolore non ex velit et qui tempor proident id deserunt nisi eu. Tempor ad Lorem ipsum reprehenderit in anim. Anim dolore ullamco enim deserunt quis ex id exercitation velit. Magna exercitation fugiat mollit pariatur ipsum ex consectetur nostrud. Id dolore officia nostrud excepteur laborum. Magna incididunt elit ipsum pariatur adipisicing enim duis est qui commodo velit aute.\r\n", "Quis esse ex qui nisi dolor. Ullamco laborum dolor esse laboris eiusmod ea magna laboris ea esse ut. Dolore ipsum pariatur veniam sint mollit. Lorem ea proident fugiat ullamco ut nisi culpa eu exercitation exercitation aliquip veniam laborum consectetur.\r\n", "Pariatur veniam laboris sit aliquip pariatur tempor aute sunt id et ut. Laboris excepteur eiusmod nisi qui quis elit enim ut cupidatat. Et et laborum in fugiat veniam consectetur ipsum laboris duis excepteur ullamco aliqua dolor Lorem. Aliqua ex amet sint anim cupidatat nisi ipsum anim et sunt deserunt. Occaecat culpa ut tempor cillum pariatur ex tempor.\r\n", "Dolor deserunt eiusmod magna do officia voluptate excepteur est cupidatat. Veniam qui cupidatat amet anim est qui consectetur sit commodo commodo ea ad. Enim ad adipisicing qui nostrud. Non nulla esse ullamco nulla et ex.\r\n", "Id ullamco ea consectetur est incididunt deserunt et esse. Elit nostrud voluptate eiusmod ut. Excepteur adipisicing qui cupidatat consequat labore id. Qui dolor aliqua do dolore do cupidatat labore ex consectetur ea sit cillum. Sint veniam eiusmod in consectetur consequat fugiat et mollit ut fugiat esse dolor adipisicing.\r\n", "Ea magna proident labore duis pariatur. Esse cillum aliquip dolor duis fugiat ea ex officia ea irure. Sint elit nisi pariatur sunt nostrud exercitation ullamco culpa magna do.\r\n", "Minim aliqua voluptate dolor consequat sint tempor deserunt amet magna excepteur. Irure do voluptate magna velit. Nostrud in reprehenderit magna officia nostrud. Cupidatat nulla irure laboris non fugiat ex ex est cupidatat excepteur officia aute velit duis. Sit voluptate id ea exercitation deserunt culpa voluptate nostrud est adipisicing incididunt. Amet proident laborum commodo magna ipsum quis.\r\n", "Ipsum consectetur consectetur excepteur tempor eiusmod ea fugiat aute velit magna in officia sunt. Sit ut sunt dolore cupidatat dolor adipisicing. Veniam nisi adipisicing esse reprehenderit amet aliqua voluptate ex commodo occaecat est voluptate mollit sunt. Pariatur aliqua qui qui in dolor. Fugiat reprehenderit sit nostrud do sint esse. Tempor sit irure adipisicing ea pariatur duis est sit est incididunt laboris quis do. Et voluptate anim minim aliquip excepteur consequat nisi anim pariatur aliquip ut ipsum dolor magna.\r\n", "Cillum sit labore excepteur magna id aliqua exercitation consequat laborum Lorem id pariatur nostrud. Lorem qui est labore sint cupidatat sint excepteur nulla in eu aliqua et. Adipisicing velit do enim occaecat laboris quis excepteur ipsum dolor occaecat Lorem dolore id exercitation.\r\n", "Incididunt in laborum reprehenderit eiusmod irure ex. Elit duis consequat minim magna. Esse consectetur aliquip cillum excepteur excepteur fugiat. Sint tempor consequat minim reprehenderit consectetur adipisicing dolor id Lorem elit non. Occaecat esse quis mollit ea et sint aute fugiat qui tempor. Adipisicing tempor duis non dolore irure elit deserunt qui do.\r\n", "Labore fugiat eiusmod sint laborum sit duis occaecat. Magna in laborum non cillum excepteur nostrud sit proident pariatur voluptate voluptate adipisicing exercitation occaecat. Ad non dolor aute ex sint do do minim exercitation veniam laborum irure magna ea. Magna do non quis sit consequat Lorem aliquip.\r\n", "Velit anim do laborum laboris laborum Lorem. Sunt do Lorem amet ipsum est sint velit sit do voluptate mollit veniam enim. Commodo do deserunt in pariatur ut elit sint elit deserunt ea. Ad dolor anim consequat aliquip ut mollit nostrud tempor sunt mollit elit. Reprehenderit laboris labore excepteur occaecat veniam adipisicing cupidatat esse. Ad enim aliquip ea minim excepteur magna. Sint velit veniam pariatur qui dolor est adipisicing ex laboris.\r\n", "Ea cupidatat ex nulla in sunt est sit dolor enim ad. Eu tempor consequat cupidatat consequat ex incididunt sint culpa. Est Lorem Lorem non cupidatat sunt ut aliqua non nostrud do ullamco. Reprehenderit ad ad nulla nostrud do nulla in. Ipsum adipisicing commodo mollit ipsum exercitation. Aliqua ea anim anim est elit. Ea incididunt consequat minim ad sunt eu cillum.\r\n", "Tempor quis excepteur eiusmod cupidatat ipsum occaecat id et occaecat. Eiusmod magna aliquip excepteur id amet elit. Ullamco dolore amet anim dolor enim ea magna magna elit. Occaecat magna pariatur in deserunt consectetur officia aliquip ullamco ex aute anim. Minim laborum eu sit elit officia esse do irure pariatur tempor et reprehenderit ullamco labore.\r\n", "Sit tempor eu minim dolore velit pariatur magna duis reprehenderit ea nulla in. Amet est do consectetur commodo do adipisicing adipisicing in amet. Cillum id ut commodo do pariatur duis aliqua nisi sint ad irure officia reprehenderit. Mollit labore id enim fugiat ullamco irure mollit cupidatat. Quis nisi amet labore eu dolor occaecat commodo aliqua laboris deserunt excepteur deserunt officia. Aliqua non ut sit ad. Laborum veniam ad velit minim dolore ea id magna dolor qui in.\r\n", "Dolore nostrud ipsum aliqua pariatur id reprehenderit enim ad eiusmod qui. Deserunt anim commodo pariatur excepteur velit eu irure nulla ex labore ipsum aliqua minim aute. Id consequat amet tempor aliquip ex elit adipisicing est do. Eu enim Lorem consectetur minim id irure nulla culpa. Consectetur do consequat aute tempor anim. Qui ad non elit dolor est adipisicing nisi amet cillum sunt quis anim laboris incididunt. Incididunt proident adipisicing labore Lorem.\r\n", "Et reprehenderit ea officia veniam. Aliquip ullamco consequat elit nisi magna mollit id elit. Amet amet sint velit labore ad nisi. Consectetur tempor id dolor aliqua esse deserunt amet. Qui laborum enim proident voluptate aute eu aute aute sit sit incididunt eu. Sunt ullamco nisi nostrud labore commodo non consectetur quis do duis minim irure. Tempor sint dolor sint aliquip dolore nostrud fugiat.\r\n", "Aute ullamco quis nisi ut excepteur nostrud duis elit. Veniam ex ad incididunt veniam voluptate. Commodo dolore ullamco sit sint adipisicing proident amet aute duis deserunt.\r\n", "Labore velit eu cillum nisi. Laboris do cupidatat et non duis cillum. Ullamco dolor tempor cupidatat voluptate laborum ullamco ea duis.\r\n", "Deserunt consequat aliqua duis aliquip nostrud nostrud dolore nisi. Culpa do sint laborum consectetur ipsum quis laborum laborum pariatur eiusmod. Consectetur laboris ad ad ut quis. Ullamco laboris qui velit id laborum voluptate qui aute nostrud aliquip ea.\r\n", "Ad cillum anim ex est consectetur mollit id in. Non enim aliquip consequat qui deserunt commodo cillum ad laborum fugiat. Dolor deserunt amet laborum tempor adipisicing voluptate dolor pariatur dolor cillum. Eu mollit ex sunt officia veniam qui est sunt proident. Non aliqua qui elit eu cupidatat ex enim ex proident. Lorem sit minim ullamco officia cupidatat duis minim. Exercitation laborum deserunt voluptate culpa tempor quis nulla id pariatur.\r\n", "Nostrud quis consectetur ut aliqua excepteur elit consectetur occaecat. Occaecat voluptate Lorem pariatur consequat ullamco fugiat minim. Anim voluptate eu eu cillum tempor dolore aliquip aliqua. Fugiat incididunt ut tempor amet minim. Voluptate nostrud minim pariatur non excepteur ullamco.\r\n", "Dolore nulla velit officia exercitation irure laboris incididunt anim in laborum in fugiat ut proident. Fugiat aute id consequat fugiat officia ut. Labore sint amet proident amet sint nisi laboris amet id ullamco culpa quis consequat proident. Magna do fugiat veniam dolore elit irure minim. Esse ullamco excepteur labore tempor labore fugiat dolore nisi cupidatat irure dolor pariatur. Magna excepteur laboris nisi eiusmod sit pariatur mollit.\r\n", "In enim aliquip officia ea ad exercitation cillum culpa occaecat dolore Lorem. Irure cillum commodo adipisicing sunt pariatur ea duis fugiat exercitation laboris culpa ullamco aute. Ut voluptate exercitation qui dolor. Irure et duis elit consequat deserunt proident.\r\n", "Officia ea Lorem sunt culpa id et tempor excepteur enim deserunt proident. Dolore aliquip dolor laboris cillum proident velit. Et culpa occaecat exercitation cupidatat irure sint adipisicing excepteur pariatur incididunt ad occaecat. Qui proident ipsum cillum minim. Quis ut culpa irure aliqua minim fugiat. In voluptate cupidatat fugiat est laborum dolor esse in pariatur voluptate.\r\n", "Voluptate enim ipsum officia aute ea adipisicing nisi ut ex do aliquip amet. Reprehenderit enim voluptate tempor ex adipisicing culpa. Culpa occaecat voluptate dolor mollit ipsum exercitation labore et tempor sit ea consectetur aliqua. Elit elit sit minim ea ea commodo do tempor cupidatat irure dolore. Occaecat esse adipisicing anim eiusmod commodo fugiat mollit amet. Incididunt tempor tempor qui occaecat cupidatat in.\r\n", "Ut qui anim velit enim aliquip do ut nulla labore. Mollit ut commodo ut eiusmod consectetur laboris aliqua qui voluptate culpa fugiat incididunt elit. Lorem ullamco esse elit elit. Labore amet incididunt ea nulla aliquip eiusmod. Sit nulla est voluptate officia ipsum aute aute cillum tempor deserunt. Laboris commodo eiusmod labore sunt aute excepteur ea consectetur reprehenderit veniam nisi. Culpa nisi sint sunt sint tempor laboris dolore cupidatat.\r\n", "Duis cillum qui nisi duis amet velit ad cillum ut elit aute sint ad. Amet laboris pariatur excepteur ipsum Lorem aliqua veniam Lorem quis mollit cupidatat aliqua exercitation. Pariatur ex ullamco sit commodo cillum eiusmod ut proident elit cillum. Commodo ut ipsum excepteur occaecat sint elit consequat ex dolor adipisicing consectetur id ut ad. Velit sit eiusmod est esse tempor incididunt consectetur eiusmod duis commodo veniam.\r\n", "Ut sunt qui officia anim laboris exercitation Lorem quis laborum do eiusmod officia. Enim consectetur occaecat fugiat cillum cillum. Dolore dolore nostrud in commodo fugiat mollit consequat occaecat non et et elit ullamco. Sit voluptate minim ut est culpa velit nulla fugiat reprehenderit eu aliquip adipisicing labore. Sit minim minim do dolor dolor. Lorem Lorem labore exercitation magna veniam eiusmod do.\r\n", "Fugiat dolor adipisicing quis aliquip aute dolore. Qui proident anim elit veniam ex aliquip eiusmod ipsum sunt pariatur est. Non fugiat duis do est officia adipisicing.\r\n", "Nulla deserunt do laboris cupidatat veniam do consectetur ipsum elit veniam in mollit eu. Ea in consequat cupidatat laboris sint fugiat irure. In commodo esse reprehenderit deserunt minim velit ullamco enim eu cupidatat tempor ex. Ullamco in non id culpa amet occaecat culpa nostrud id. Non occaecat culpa magna incididunt.\r\n", "Enim laboris ex mollit reprehenderit eiusmod exercitation magna. Exercitation Lorem ex mollit non non culpa labore enim. Adipisicing labore dolore incididunt do amet aliquip excepteur ad et nostrud officia aute veniam voluptate. Fugiat enim eiusmod Lorem esse. Minim ullamco commodo consequat ex commodo aliqua eu nulla eu. Veniam non enim nulla ut Lorem nostrud minim sint duis.\r\n", "Enim duis consectetur in ullamco cillum veniam nulla amet. Exercitation nisi sunt sunt duis in culpa nisi magna ex id ipsum laboris reprehenderit qui. Officia pariatur qui ex fugiat veniam et sunt sit nostrud. Veniam ullamco tempor fugiat minim Lorem proident velit in eiusmod elit. Enim minim excepteur aute aliquip ex magna commodo dolore qui et labore. Proident eu aliquip cillum dolor. Nostrud ipsum ut irure consequat fugiat nulla proident occaecat laborum.\r\n", "Amet duis eiusmod sunt adipisicing esse ex nostrud consectetur voluptate cillum. Ipsum occaecat sit et anim velit irure ea incididunt cupidatat ullamco in nisi quis. Esse officia ipsum commodo qui quis qui do. Commodo aliquip amet aute sit sit ut cupidatat elit nostrud.\r\n", "Laboris laboris sit mollit cillum nulla deserunt commodo culpa est commodo anim id anim sit. Officia id consectetur velit incididunt est dolor sunt ipsum magna aliqua consectetur. Eiusmod pariatur minim deserunt cupidatat veniam Lorem aliquip sunt proident eu Lorem sit dolor fugiat. Proident qui ut ex in incididunt nulla nulla dolor ex laboris ea ad.\r\n", "Ex incididunt enim labore nulla cupidatat elit. Quis ut incididunt incididunt non irure commodo do mollit cillum anim excepteur. Qui consequat laborum dolore elit tempor aute ut nulla pariatur eu ullamco veniam. Nisi non velit labore in commodo excepteur culpa nulla tempor cillum. Ipsum qui sit sint reprehenderit ut labore incididunt dolor aliquip sunt. Reprehenderit occaecat tempor nisi laborum.\r\n", "Lorem officia ullamco eu occaecat in magna eiusmod consectetur nisi aliqua mollit esse. Ullamco ex aute nostrud pariatur do enim cillum sint do fugiat nostrud culpa tempor. Do aliquip excepteur nostrud culpa eu pariatur eiusmod cillum excepteur do. Est sunt non quis cillum voluptate ex.\r\n", "Deserunt consectetur tempor irure mollit qui tempor et. Labore enim eu irure laboris in. Nisi in tempor ex occaecat amet cupidatat laboris occaecat amet minim ut magna incididunt id. Consequat cillum laborum commodo mollit. Et magna culpa sunt dolore consequat laboris et sit. Deserunt qui voluptate excepteur dolor. Eu qui amet est proident.\r\n", "Eu elit minim eiusmod occaecat eu nostrud dolor qui ut elit. Sunt dolore proident ea eu do eiusmod fugiat incididunt pariatur duis amet Lorem nisi ut. Adipisicing quis veniam cupidatat Lorem sint culpa sunt veniam sint. Excepteur eu exercitation est magna pariatur veniam dolore qui fugiat labore proident eiusmod cillum. Commodo reprehenderit elit proident duis sint magna.\r\n", "Ut aliquip pariatur deserunt nostrud commodo ad proident est exercitation. Sit minim do ea enim sint officia nisi incididunt laborum. Ex amet duis commodo fugiat. Ut aute tempor deserunt irure occaecat aliquip voluptate cillum aute elit qui nostrud.\r\n", "Irure et quis consectetur sit est do sunt aliquip eu. Cupidatat pariatur consequat dolore consectetur. Adipisicing magna velit mollit occaecat do id. Nisi pariatur cupidatat cillum incididunt excepteur consectetur excepteur do laborum deserunt irure pariatur cillum.\r\n", "Adipisicing esse incididunt cillum est irure consequat irure ad aute voluptate. Incididunt do occaecat nostrud do ipsum pariatur Lorem qui laboris et pariatur. Est exercitation dolor culpa ad velit ut et.\r\n", "Sit eiusmod id enim ad ex dolor pariatur do. Ullamco occaecat quis dolor minim non elit labore amet est. Commodo velit eu nulla eiusmod ullamco. Incididunt anim pariatur aute eiusmod veniam tempor enim officia elit id. Elit Lorem est commodo dolore nostrud. Labore et consectetur do exercitation veniam laboris incididunt aliqua proident dolore ea officia cupidatat. Velit laboris aliquip deserunt labore commodo.\r\n", "Proident nostrud labore eu nostrud. Excepteur ut in velit labore ea proident labore ea sint cillum. Incididunt ipsum consectetur officia irure sit pariatur veniam id velit officia mollit. Adipisicing magna voluptate velit excepteur enim consectetur incididunt voluptate tempor occaecat fugiat velit excepteur labore. Do do incididunt qui nisi voluptate enim. Laboris aute sit voluptate cillum pariatur minim excepteur ullamco mollit deserunt.\r\n", "Excepteur laborum adipisicing nisi elit fugiat tempor. Elit laboris qui enim labore duis. Proident tempor in consectetur proident excepteur do ex laboris sit.\r\n", "Dolore do ea incididunt do duis dolore eu labore nisi cupidatat voluptate amet incididunt minim. Nulla pariatur mollit cupidatat adipisicing nulla et. Dolor aliquip in ex magna excepteur. Nulla consequat minim consequat ullamco dolor laboris ullamco eu reprehenderit duis nostrud pariatur.\r\n", "Id nisi labore duis qui. Incididunt laboris tempor aute do sit. Occaecat excepteur est mollit ea in mollit ullamco est amet reprehenderit.\r\n", "Aute labore ipsum velit non voluptate eiusmod et reprehenderit cupidatat occaecat. Lorem tempor tempor consectetur exercitation qui nostrud sunt cillum quis ut non dolore. Reprehenderit consequat reprehenderit laborum qui pariatur anim et officia est cupidatat enim velit velit.\r\n", "Commodo ex et fugiat cupidatat non adipisicing commodo. Minim ad dolore fugiat mollit cupidatat aliqua sunt dolor sit. Labore esse labore velit aute enim. Nulla duis incididunt est aliquip consectetur elit qui incididunt minim minim labore amet sit cillum.\r\n" ] ================================================ FILE: C/thirdparty/rapidjson/bin/types/readme.txt ================================================ Test data obtained from https://github.com/xpol/lua-rapidjson/tree/master/performance ================================================ FILE: C/thirdparty/rapidjson/contrib/natvis/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2017 Bart Muzzin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Derived from: The MIT License (MIT) Copyright (c) 2015 mojmir svoboda Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: C/thirdparty/rapidjson/contrib/natvis/README.md ================================================ # rapidjson.natvis This file can be used as a [Visual Studio Visualizer](https://docs.microsoft.com/en-gb/visualstudio/debugger/create-custom-views-of-native-objects) to aid in visualizing rapidjson structures within the Visual Studio debugger. Natvis visualizers are supported in Visual Studio 2012 and later. To install, copy the file into this directory: `%USERPROFILE%\Documents\Visual Studio 2012\Visualizers` Each version of Visual Studio has a similar directory, it must be copied into each directory to be used with that particular version. In Visual Studio 2015 and later, this can be done without restarting Visual Studio (a new debugging session must be started). ================================================ FILE: C/thirdparty/rapidjson/contrib/natvis/rapidjson.natvis ================================================ null true false {(const Ch*)data_.ss.str,na} {(const Ch*)((size_t)data_.s.str & 0x0000FFFFFFFFFFFF),[data_.s.length]na} {data_.n.i.i} {data_.n.u.u} {data_.n.i64} {data_.n.u64} {data_.n.d} Object members={data_.o.size} Array members={data_.a.size} data_.o.size data_.o.capacity data_.o.size (rapidjson::GenericMember<$T1,$T2>*)(((size_t)data_.o.members) & 0x0000FFFFFFFFFFFF) data_.a.size data_.a.capacity data_.a.size (rapidjson::GenericValue<$T1,$T2>*)(((size_t)data_.a.elements) & 0x0000FFFFFFFFFFFF) ================================================ FILE: C/thirdparty/rapidjson/doc/CMakeLists.txt ================================================ find_package(Doxygen) IF(NOT DOXYGEN_FOUND) MESSAGE(STATUS "No Doxygen found. Documentation won't be built") ELSE() file(GLOB SOURCES ${CMAKE_CURRENT_LIST_DIR}/../include/*) file(GLOB MARKDOWN_DOC ${CMAKE_CURRENT_LIST_DIR}/../doc/*.md) list(APPEND MARKDOWN_DOC ${CMAKE_CURRENT_LIST_DIR}/../readme.md) CONFIGURE_FILE(Doxyfile.in Doxyfile @ONLY) CONFIGURE_FILE(Doxyfile.zh-cn.in Doxyfile.zh-cn @ONLY) file(GLOB DOXYFILES ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile*) add_custom_command(OUTPUT html COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile.zh-cn COMMAND ${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/html DEPENDS ${MARKDOWN_DOC} ${SOURCES} ${DOXYFILES} WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/../ ) add_custom_target(doc ALL DEPENDS html) install(DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/html DESTINATION ${DOC_INSTALL_DIR} COMPONENT doc) ENDIF() ================================================ FILE: C/thirdparty/rapidjson/doc/Doxyfile.in ================================================ # Doxyfile 1.8.7 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. # # All text after a double hash (##) is considered a comment and is placed in # front of the TAG it is preceding. # # All text after a single hash (#) is considered a comment and will be ignored. # The format is: # TAG = value [value, ...] # For lists, items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (\" \"). #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all text # before the first occurrence of this tag. Doxygen uses libiconv (or the iconv # built into libc) for the transcoding. See http://www.gnu.org/software/libiconv # for the list of possible encodings. # The default value is: UTF-8. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded by # double-quotes, unless you are using Doxywizard) that should identify the # project for which the documentation is generated. This name is used in the # title of most generated pages and in a few other places. # The default value is: My Project. PROJECT_NAME = RapidJSON # The PROJECT_NUMBER tag can be used to enter a project or revision number. This # could be handy for archiving the generated documentation or if some version # control system is used. PROJECT_NUMBER = # Using the PROJECT_BRIEF tag one can provide an optional one line description # for a project that appears at the top of each page and should give viewer a # quick idea about the purpose of the project. Keep the description short. PROJECT_BRIEF = "A fast JSON parser/generator for C++ with both SAX/DOM style API" # With the PROJECT_LOGO tag one can specify an logo or icon that is included in # the documentation. The maximum height of the logo should not exceed 55 pixels # and the maximum width should not exceed 200 pixels. Doxygen will copy the logo # to the output directory. PROJECT_LOGO = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path # into which the generated documentation will be written. If a relative path is # entered, it will be relative to the location where doxygen was started. If # left blank the current directory will be used. OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where # putting all generated files in the same directory would otherwise causes # performance problems for the file system. # The default value is: NO. CREATE_SUBDIRS = NO # If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII # characters to appear in the names of generated files. If set to NO, non-ASCII # characters will be escaped, for example _xE3_x81_x84 will be used for Unicode # U+3044. # The default value is: NO. ALLOW_UNICODE_NAMES = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese, # Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States), # Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian, # Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en (Korean with English messages), Latvian, Lithuanian, # Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian, # Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish, # Ukrainian and Vietnamese. # The default value is: English. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. # The default value is: YES. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator that is # used to form the text in various listings. Each string in this list, if found # as the leading text of the brief description, will be stripped from the text # and the result, after processing the whole list, is used as the annotated # text. Otherwise, the brief description is used as-is. If left blank, the # following values are used ($name is automatically replaced with the name of # the entity):The $name class, The $name widget, The $name file, is, provides, # specifies, contains, represents, a, an and the. ABBREVIATE_BRIEF = "The $name class" \ "The $name widget" \ "The $name file" \ is \ provides \ specifies \ contains \ represents \ a \ an \ the # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # doxygen will generate a detailed section even if there is only a brief # description. # The default value is: NO. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. # The default value is: NO. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. FULL_PATH_NAMES = YES # The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. # Stripping is only done if one of the specified strings matches the left-hand # part of the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the path to # strip. # # Note that you can specify absolute paths here, but also relative paths, which # will be relative from the directory where doxygen is started. # This tag requires that the tag FULL_PATH_NAMES is set to YES. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the # path mentioned in the documentation of a class, which tells the reader which # header file to include in order to use a class. If left blank only the name of # the header file containing the class definition is used. Otherwise one should # specify the list of include paths that are normally passed to the compiler # using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but # less readable) file names. This can be useful is your file systems doesn't # support long names like on DOS, Mac, or CD-ROM. # The default value is: NO. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the # first line (until the first dot) of a Javadoc-style comment as the brief # description. If set to NO, the Javadoc-style will behave just like regular Qt- # style comments (thus requiring an explicit @brief command for a brief # description.) # The default value is: NO. JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first # line (until the first dot) of a Qt-style comment as the brief description. If # set to NO, the Qt-style will behave just like regular Qt-style comments (thus # requiring an explicit \brief command for a brief description.) # The default value is: NO. QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a # multi-line C++ special comment block (i.e. a block of //! or /// comments) as # a brief description. This used to be the default behavior. The new default is # to treat a multi-line C++ comment block as a detailed description. Set this # tag to YES if you prefer the old behavior instead. # # Note that setting this tag to YES also means that rational rose comments are # not recognized any more. # The default value is: NO. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the # documentation from any documented member that it re-implements. # The default value is: YES. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a # new page for each member. If set to NO, the documentation of a member will be # part of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen # uses this value to replace tabs by spaces in code fragments. # Minimum value: 1, maximum value: 16, default value: 4. TAB_SIZE = 4 # This tag can be used to specify a number of aliases that act as commands in # the documentation. An alias has the form: # name=value # For example adding # "sideeffect=@par Side Effects:\n" # will allow you to put the command \sideeffect (or @sideeffect) in the # documentation, which will result in a user-defined paragraph with heading # "Side Effects:". You can put \n's in the value part of an alias to insert # newlines. ALIASES = # This tag can be used to specify a number of word-keyword mappings (TCL only). # A mapping has the form "name=value". For example adding "class=itcl::class" # will allow you to use the command class in the itcl::class meaning. TCL_SUBST = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources # only. Doxygen will then generate output that is more tailored for C. For # instance, some of the names that are used will be different. The list of all # members will be omitted, etc. # The default value is: NO. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or # Python sources only. Doxygen will then generate output that is more tailored # for that language. For instance, namespaces will be presented as packages, # qualified scopes will look different, etc. # The default value is: NO. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources. Doxygen will then generate output that is tailored for Fortran. # The default value is: NO. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for VHDL. # The default value is: NO. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it # parses. With this tag you can assign which parser to use for a given # extension. Doxygen has a built-in mapping, but you can override or extend it # using this tag. The format is ext=language, where ext is a file extension, and # language is one of the parsers supported by doxygen: IDL, Java, Javascript, # C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran: # FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran: # Fortran. In the later case the parser tries to guess whether the code is fixed # or free formatted code, this is the default for Fortran type files), VHDL. For # instance to make doxygen treat .inc files as Fortran files (default is PHP), # and .f files as C (default is Fortran), use: inc=Fortran f=C. # # Note For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. EXTENSION_MAPPING = # If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments # according to the Markdown format, which allows for more readable # documentation. See http://daringfireball.net/projects/markdown/ for details. # The output of markdown processing is further processed by doxygen, so you can # mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in # case of backward compatibilities issues. # The default value is: YES. MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can # be prevented in individual cases by by putting a % sign in front of the word # or globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should set this # tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); # versus func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. # The default value is: NO. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. # The default value is: NO. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip (see: # http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen # will parse them like normal C++ but will assume all classes use public instead # of private inheritance when no explicit protection keyword is present. # The default value is: NO. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate # getter and setter methods for a property. Setting this option to YES will make # doxygen to replace the get and set methods by a property in the documentation. # This will only work if the methods are indeed getting or setting a simple # type. If this is not the case, or you want to show the methods anyway, you # should set this option to NO. # The default value is: YES. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent # subgrouping. Alternatively, this can be done per class using the # \nosubgrouping command. # The default value is: YES. SUBGROUPING = YES # When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions # are shown inside the group in which they are included (e.g. using \ingroup) # instead of on a separate page (for HTML and Man pages) or section (for LaTeX # and RTF). # # Note that this feature does not work in combination with # SEPARATE_MEMBER_PAGES. # The default value is: NO. INLINE_GROUPED_CLASSES = YES # When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions # with only public data fields or simple typedef fields will be shown inline in # the documentation of the scope in which they are defined (i.e. file, # namespace, or group documentation), provided this scope is documented. If set # to NO, structs, classes, and unions are shown on a separate page (for HTML and # Man pages) or section (for LaTeX and RTF). # The default value is: NO. INLINE_SIMPLE_STRUCTS = NO # When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or # enum is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically be # useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. # The default value is: NO. TYPEDEF_HIDES_STRUCT = NO # The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This # cache is used to resolve symbols given their name and scope. Since this can be # an expensive process and often the same symbol appears multiple times in the # code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small # doxygen will become slower. If the cache is too large, memory is wasted. The # cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range # is 0..9, the default is 0, corresponding to a cache size of 2^16=65536 # symbols. At the end of a run doxygen will report the cache usage and suggest # the optimal cache size from a speed point of view. # Minimum value: 0, maximum value: 9, default value: 0. LOOKUP_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. # Note: This will also disable the warnings about undocumented members that are # normally produced when WARNINGS is set to YES. # The default value is: NO. EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO # If the EXTRACT_PACKAGE tag is set to YES all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined # locally in source files will be included in the documentation. If set to NO # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local methods, # which are defined in the implementation section but not in the interface are # included in the documentation. If set to NO only methods in the interface are # included. # The default value is: NO. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base name of # the file that contains the anonymous namespace. By default anonymous namespace # are hidden. # The default value is: NO. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all # undocumented members inside documented classes or files. If set to NO these # members will be included in the various overviews, but no documentation # section is generated. This option has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set # to NO these classes will be included in the various overviews. This option has # no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend # (class|struct|union) declarations. If set to NO these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any # documentation blocks found inside the body of a function. If set to NO these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation that is typed after a # \internal command is included. If the tag is set to NO then the documentation # will be excluded. Set it to YES to include the internal documentation. # The default value is: NO. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file # names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. # The default value is: system dependent. CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with # their full class and namespace scopes in the documentation. If set to YES the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. SHOW_INCLUDE_FILES = YES # If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each # grouped member an include statement to the documentation, telling the reader # which file to include in order to use the member. # The default value is: NO. SHOW_GROUPED_MEMB_INC = NO # If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include # files with double quotes in the documentation rather than with sharp brackets. # The default value is: NO. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the # documentation for inline members. # The default value is: YES. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member # name. If set to NO the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member # name. If set to NO the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the # (brief and detailed) documentation of class members so that constructors and # destructors are listed first. If set to NO the constructors will appear in the # respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS. # Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief # member documentation. # Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting # detailed member documentation. # The default value is: NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy # of group names into alphabetical order. If set to NO the group names will # appear in their defined order. # The default value is: NO. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by # fully-qualified names, including namespaces. If set to NO, the class list will # be sorted only by class name, not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the alphabetical # list. # The default value is: NO. SORT_BY_SCOPE_NAME = NO # If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper # type resolution of all parameters of a function it will reject a match between # the prototype and the implementation of a member function even if there is # only one candidate or it is obvious which candidate to choose by doing a # simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still # accept a match between prototype and implementation in such cases. # The default value is: NO. STRICT_PROTO_MATCHING = NO # The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the # todo list. This list is created by putting \todo commands in the # documentation. # The default value is: YES. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the # test list. This list is created by putting \test commands in the # documentation. # The default value is: YES. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional documentation # sections, marked by \if ... \endif and \cond # ... \endcond blocks. ENABLED_SECTIONS = $(RAPIDJSON_SECTIONS) # The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the # initial value of a variable or macro / define can have for it to appear in the # documentation. If the initializer consists of more lines than specified here # it will be hidden. Use a value of 0 to hide initializers completely. The # appearance of the value of individual variables and macros / defines can be # controlled using \showinitializer or \hideinitializer command in the # documentation regardless of this setting. # Minimum value: 0, maximum value: 10000, default value: 30. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at # the bottom of the documentation of classes and structs. If set to YES the list # will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES # Set the SHOW_FILES tag to NO to disable the generation of the Files page. This # will remove the Files entry from the Quick Index and from the Folder Tree View # (if specified). # The default value is: YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces # page. This will remove the Namespaces entry from the Quick Index and from the # Folder Tree View (if specified). # The default value is: YES. SHOW_NAMESPACES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command command input-file, where command is the value of the # FILE_VERSION_FILTER tag, and input-file is the name of an input file provided # by doxygen. Whatever the program writes to standard output is used as the file # version. For an example see the documentation. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed # by doxygen. The layout file controls the global structure of the generated # output files in an output format independent way. To create the layout file # that represents doxygen's defaults, run doxygen with the -l option. You can # optionally specify a file name after the option, if omitted DoxygenLayout.xml # will be used as the name of the layout file. # # Note that if you run doxygen from a directory containing a file called # DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE # tag is left empty. LAYOUT_FILE = # The CITE_BIB_FILES tag can be used to specify one or more bib files containing # the reference definitions. This must be a list of .bib files. The .bib # extension is automatically appended if omitted. This requires the bibtex tool # to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info. # For LaTeX the style of the bibliography can be controlled using # LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the # search path. Do not use file names with spaces, bibtex cannot handle them. See # also \cite for info how to create references. CITE_BIB_FILES = #--------------------------------------------------------------------------- # Configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated to # standard output by doxygen. If QUIET is set to YES this implies that the # messages are off. # The default value is: NO. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. # The default value is: YES. WARNINGS = YES # If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. WARN_IF_UNDOCUMENTED = YES # If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some parameters # in a documented function, or documenting parameters that don't exist or using # markup commands wrongly. # The default value is: YES. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return # value. If set to NO doxygen will only warn about wrong or incomplete parameter # documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that doxygen # can produce. The string should contain the $file, $line, and $text tags, which # will be replaced by the file and line number from which the warning originated # and the warning text. Optionally the format may contain $version, which will # be replaced by the version of the file (if it could be obtained via # FILE_VERSION_FILTER) # The default value is: $file:$line: $text. WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning and error # messages should be written. If left blank the output is written to standard # error (stderr). WARN_LOGFILE = #--------------------------------------------------------------------------- # Configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with # spaces. # Note: If this tag is empty the current directory is searched. INPUT = readme.md \ CHANGELOG.md \ include/rapidjson/rapidjson.h \ include/ \ doc/features.md \ doc/tutorial.md \ doc/pointer.md \ doc/stream.md \ doc/encoding.md \ doc/dom.md \ doc/sax.md \ doc/schema.md \ doc/performance.md \ doc/internals.md \ doc/faq.md # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses # libiconv (or the iconv built into libc) for the transcoding. See the libiconv # documentation (see: http://www.gnu.org/software/libiconv) for the list of # possible encodings. # The default value is: UTF-8. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank the # following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, # *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, # *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, # *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, # *.qsf, *.as and *.js. FILE_PATTERNS = *.c \ *.cc \ *.cxx \ *.cpp \ *.h \ *.hh \ *.hxx \ *.hpp \ *.inc \ *.md # The RECURSIVE tag can be used to specify whether or not subdirectories should # be searched for input files as well. # The default value is: NO. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should be # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. # # Note that relative paths are relative to the directory from which doxygen is # run. EXCLUDE = ./include/rapidjson/msinttypes/ # The EXCLUDE_SYMLINKS tag can be used to select whether or not files or # directories that are symbolic links (a Unix file system feature) are excluded # from the input. # The default value is: NO. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test # # Note that the wildcards are matched against the file with absolute path, so to # exclude all test directories use the pattern */test/* EXCLUDE_SYMBOLS = internal # The EXAMPLE_PATH tag can be used to specify one or more files or directories # that contain example code fragments that are included (see the \include # command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and # *.h) to filter out the source-files in the directories. If left blank all # files are included. EXAMPLE_PATTERNS = * # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude commands # irrespective of the value of the RECURSIVE tag. # The default value is: NO. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or directories # that contain images that are to be included in the documentation (see the # \image command). IMAGE_PATH = ./doc # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command: # # # # where is the value of the INPUT_FILTER tag, and is the # name of an input file. Doxygen will then use the output that the filter # program writes to standard output. If FILTER_PATTERNS is specified, this tag # will be ignored. # # Note that the filter must not add or remove lines; it is applied before the # code is scanned, but not when the output code is generated. If lines are added # or removed, the anchors will not be placed correctly. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: pattern=filter # (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER ) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. FILTER_SOURCE_FILES = NO # The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file # pattern. A pattern will override the setting for FILTER_PATTERN (if any) and # it is also possible to disable source filtering for a specific pattern using # *.ext= (so without naming a filter). # This tag requires that the tag FILTER_SOURCE_FILES is set to YES. FILTER_SOURCE_PATTERNS = # If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that # is part of the input, its contents will be placed on the main page # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. USE_MDFILE_AS_MAINPAGE = readme.md #--------------------------------------------------------------------------- # Configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will be # generated. Documented entities will be cross-referenced with these sources. # # Note: To get rid of all source code in the generated output, make sure that # also VERBATIM_HEADERS is set to NO. # The default value is: NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body of functions, # classes and enums directly into the documentation. # The default value is: NO. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any # special comment blocks from generated source code fragments. Normal C, C++ and # Fortran comments will always remain visible. # The default value is: YES. STRIP_CODE_COMMENTS = NO # If the REFERENCED_BY_RELATION tag is set to YES then for each documented # function all documented functions referencing it will be listed. # The default value is: NO. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES then for each documented function # all documented entities called/used by that function will be listed. # The default value is: NO. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set # to YES, then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. REFERENCES_LINK_SOURCE = YES # If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the # source code will show a tooltip with additional information such as prototype, # brief description and links to the definition and documentation. Since this # will make the HTML file larger and loading of large files a bit slower, you # can opt to disable this feature. # The default value is: YES. # This tag requires that the tag SOURCE_BROWSER is set to YES. SOURCE_TOOLTIPS = YES # If the USE_HTAGS tag is set to YES then the references to source code will # point to the HTML generated by the htags(1) tool instead of doxygen built-in # source browser. The htags tool is part of GNU's global source tagging system # (see http://www.gnu.org/software/global/global.html). You will need version # 4.8.6 or higher. # # To use it do the following: # - Install the latest version of global # - Enable SOURCE_BROWSER and USE_HTAGS in the config file # - Make sure the INPUT points to the root of the source tree # - Run doxygen as normal # # Doxygen will invoke htags (and that will in turn invoke gtags), so these # tools must be available from the command line (i.e. in the search path). # # The result: instead of the source browser generated by doxygen, the links to # source code will now point to the output of htags. # The default value is: NO. # This tag requires that the tag SOURCE_BROWSER is set to YES. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a # verbatim copy of the header file for each class for which an include is # specified. Set to NO to disable this. # See also: Section \class. # The default value is: YES. VERBATIM_HEADERS = YES # If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the # clang parser (see: http://clang.llvm.org/) for more accurate parsing at the # cost of reduced performance. This can be particularly helpful with template # rich C++ code for which doxygen's built-in parser lacks the necessary type # information. # Note: The availability of this option depends on whether or not doxygen was # compiled with the --with-libclang option. # The default value is: NO. CLANG_ASSISTED_PARSING = NO # If clang assisted parsing is enabled you can provide the compiler with command # line options that you would normally use when invoking the compiler. Note that # the include paths will already be set by doxygen for the files and directories # specified with INPUT and INCLUDE_PATH. # This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES. CLANG_OPTIONS = #--------------------------------------------------------------------------- # Configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all # compounds will be generated. Enable this if the project contains a lot of # classes, structs, unions or interfaces. # The default value is: YES. ALPHABETICAL_INDEX = NO # The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in # which the alphabetical index list will be split. # Minimum value: 1, maximum value: 20, default value: 5. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all classes will # be put under the same header in the alphabetical index. The IGNORE_PREFIX tag # can be used to specify a prefix (or a list of prefixes) that should be ignored # while generating the index headers. # This tag requires that the tag ALPHABETICAL_INDEX is set to YES. IGNORE_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of # it. # The default directory is: html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for each # generated HTML page (for example: .htm, .php, .asp). # The default value is: .html. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a user-defined HTML header file for # each generated HTML page. If the tag is left blank doxygen will generate a # standard header. # # To get valid HTML the header file that includes any scripts and style sheets # that doxygen needs, which is dependent on the configuration options used (e.g. # the setting GENERATE_TREEVIEW). It is highly recommended to start with a # default header using # doxygen -w html new_header.html new_footer.html new_stylesheet.css # YourConfigFile # and then modify the file new_header.html. See also section "Doxygen usage" # for information on how to generate the default header that doxygen normally # uses. # Note: The header is subject to change so you typically have to regenerate the # default header when upgrading to a newer version of doxygen. For a description # of the possible markers and block names see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_HEADER = ./doc/misc/header.html # The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each # generated HTML page. If the tag is left blank doxygen will generate a standard # footer. See HTML_HEADER for more information on how to generate a default # footer and what special commands can be used inside the footer. See also # section "Doxygen usage" for information on how to generate the default footer # that doxygen normally uses. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_FOOTER = ./doc/misc/footer.html # The HTML_STYLESHEET tag can be used to specify a user-defined cascading style # sheet that is used by each HTML page. It can be used to fine-tune the look of # the HTML output. If left blank doxygen will generate a default style sheet. # See also section "Doxygen usage" for information on how to generate the style # sheet that doxygen normally uses. # Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as # it is more robust and this tag (HTML_STYLESHEET) will in the future become # obsolete. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_STYLESHEET = # The HTML_EXTRA_STYLESHEET tag can be used to specify an additional user- # defined cascading style sheet that is included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the # standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet file to the output directory. For an example # see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_STYLESHEET = ./doc/misc/doxygenextra.css # The HTML_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the HTML output directory. Note # that these files will be copied to the base HTML output directory. Use the # $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these # files. In the HTML_STYLESHEET file, use the file name only. Also note that the # files will be copied as-is; there are no commands or markers available. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen # will adjust the colors in the stylesheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 # purple, and 360 is red again. # Minimum value: 0, maximum value: 359, default value: 220. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_HUE = 220 # The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors # in the HTML output. For a value of 0 the output will use grayscales only. A # value of 255 will produce the most vivid colors. # Minimum value: 0, maximum value: 255, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_SAT = 100 # The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the # luminance component of the colors in the HTML output. Values below 100 # gradually make the output lighter, whereas values above 100 make the output # darker. The value divided by 100 is the actual gamma applied, so 80 represents # a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not # change the gamma. # Minimum value: 40, maximum value: 240, default value: 80. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_COLORSTYLE_GAMMA = 80 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this # to NO can help when comparing the output of multiple runs. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_DYNAMIC_SECTIONS = NO # With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries # shown in the various tree structured indices initially; the user can expand # and collapse entries dynamically later on. Doxygen will expand the tree to # such a level that at most the specified number of entries are visible (unless # a fully collapsed tree already exceeds this amount). So setting the number of # entries 1 will produce a full collapsed tree by default. 0 is a special value # representing an infinite number of entries and will result in a full expanded # tree by default. # Minimum value: 0, maximum value: 9999, default value: 100. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_INDEX_NUM_ENTRIES = 100 # If the GENERATE_DOCSET tag is set to YES, additional index files will be # generated that can be used as input for Apple's Xcode 3 integrated development # environment (see: http://developer.apple.com/tools/xcode/), introduced with # OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a # Makefile in the HTML output directory. Running make will produce the docset in # that directory and running make install will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at # startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html # for more information. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_DOCSET = NO # This tag determines the name of the docset feed. A documentation feed provides # an umbrella under which multiple documentation sets from a single provider # (such as a company or product suite) can be grouped. # The default value is: Doxygen generated docs. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_FEEDNAME = "Doxygen generated docs" # This tag specifies a string that should uniquely identify the documentation # set bundle. This should be a reverse domain-name style string, e.g. # com.mycompany.MyDocSet. Doxygen will append .docset to the name. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_BUNDLE_ID = org.doxygen.Project # The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify # the documentation publisher. This should be a reverse domain-name style # string, e.g. com.mycompany.MyDocSet.documentation. # The default value is: org.doxygen.Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_ID = org.doxygen.Publisher # The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher. # The default value is: Publisher. # This tag requires that the tag GENERATE_DOCSET is set to YES. DOCSET_PUBLISHER_NAME = Publisher # If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three # additional HTML index files: index.hhp, index.hhc, and index.hhk. The # index.hhp is a project file that can be read by Microsoft's HTML Help Workshop # (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on # Windows. # # The HTML Help Workshop contains a compiler that can convert all HTML output # generated by doxygen into a single compiled HTML file (.chm). Compiled HTML # files are now used as the Windows 98 help format, and will replace the old # Windows help format (.hlp) on all Windows platforms in the future. Compressed # HTML files also contain an index, a table of contents, and you can search for # words in the documentation. The HTML workshop also contains a viewer for # compressed HTML files. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_HTMLHELP = NO # The CHM_FILE tag can be used to specify the file name of the resulting .chm # file. You can add a path in front of the file if the result should not be # written to the html output directory. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path # including file name) of the HTML help compiler ( hhc.exe). If non-empty # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = # The GENERATE_CHI flag controls if a separate .chi index file is generated ( # YES) or that it should be included in the master .chm file ( NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO # The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = # The BINARY_TOC flag controls whether a binary table of contents is generated ( # YES) or a normal table of contents ( NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members to # the table of contents of the HTML help documentation and to the tree view. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and # QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that # can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help # (.qch) of the generated HTML documentation. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify # the file name of the resulting .qch file. The path specified is relative to # the HTML output folder. # This tag requires that the tag GENERATE_QHP is set to YES. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help # Project output. For more information please see Qt Help Project / Namespace # (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace). # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt # Help Project output. For more information please see Qt Help Project / Virtual # Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual- # folders). # The default value is: doc. # This tag requires that the tag GENERATE_QHP is set to YES. QHP_VIRTUAL_FOLDER = doc # If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom # filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_NAME = # The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the # custom filter to add. For more information please see Qt Help Project / Custom # Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom- # filters). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this # project's filter section matches. Qt Help Project / Filter Attributes (see: # http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes). # This tag requires that the tag GENERATE_QHP is set to YES. QHP_SECT_FILTER_ATTRS = # The QHG_LOCATION tag can be used to specify the location of Qt's # qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the # generated .qhp file. # This tag requires that the tag GENERATE_QHP is set to YES. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be # generated, together with the HTML files, they form an Eclipse help plugin. To # install this plugin and make it available under the help contents menu in # Eclipse, the contents of the directory containing the HTML and XML files needs # to be copied into the plugins directory of eclipse. The name of the directory # within the plugins directory should be the same as the ECLIPSE_DOC_ID value. # After copying Eclipse needs to be restarted before the help appears. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_ECLIPSEHELP = NO # A unique identifier for the Eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have this # name. Each documentation set should have its own identifier. # The default value is: org.doxygen.Project. # This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES. ECLIPSE_DOC_ID = org.doxygen.Project # If you want full control over the layout of the generated HTML pages it might # be necessary to disable the index and replace it with your own. The # DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top # of each HTML page. A value of NO enables the index and the value YES disables # it. Since the tabs in the index contain the same information as the navigation # tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. DISABLE_INDEX = YES # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. If the tag # value is set to YES, a side panel will be generated containing a tree-like # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the # HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has # the same information as the tab index, you could consider setting # DISABLE_INDEX to YES when enabling this option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. GENERATE_TREEVIEW = YES # The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that # doxygen will group on one line in the generated HTML documentation. # # Note that a value of 0 will completely suppress the enum values from appearing # in the overview section. # Minimum value: 0, maximum value: 20, default value: 4. # This tag requires that the tag GENERATE_HTML is set to YES. ENUM_VALUES_PER_LINE = 4 # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used # to set the initial width (in pixels) of the frame in which the tree is shown. # Minimum value: 0, maximum value: 1500, default value: 250. # This tag requires that the tag GENERATE_HTML is set to YES. TREEVIEW_WIDTH = 250 # When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. EXT_LINKS_IN_WINDOW = NO # Use this tag to change the font size of LaTeX formulas included as images in # the HTML documentation. When you change the font size after a successful # doxygen run you need to manually remove any form_*.png images from the HTML # output directory to force them to be regenerated. # Minimum value: 8, maximum value: 50, default value: 10. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_FONTSIZE = 10 # Use the FORMULA_TRANPARENT tag to determine whether or not the images # generated for formulas are transparent PNGs. Transparent PNGs are not # supported properly for IE 6.0, but are supported on all modern browsers. # # Note that when changing this option you need to delete any form_*.png files in # the HTML output directory before the changes have effect. # The default value is: YES. # This tag requires that the tag GENERATE_HTML is set to YES. FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering # instead of using prerendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. USE_MATHJAX = NO # When MathJax is enabled you can set the default output format to be used for # the MathJax output. See the MathJax site (see: # http://docs.mathjax.org/en/latest/output.html) for more details. # Possible values are: HTML-CSS (which is slower, but has the best # compatibility), NativeMML (i.e. MathML) and SVG. # The default value is: HTML-CSS. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_FORMAT = HTML-CSS # When MathJax is enabled you need to specify the location relative to the HTML # output directory using the MATHJAX_RELPATH option. The destination directory # should contain the MathJax.js script. For instance, if the mathjax directory # is located at the same level as the HTML output directory, then # MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax # Content Delivery Network so you can quickly see the result without installing # MathJax. However, it is strongly recommended to install a local copy of # MathJax from http://www.mathjax.org before deployment. # The default value is: http://cdn.mathjax.org/mathjax/latest. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_RELPATH = http://www.mathjax.org/mathjax # The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax # extension names that should be enabled during MathJax rendering. For example # MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_EXTENSIONS = # The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces # of code that will be used on startup of the MathJax code. See the MathJax site # (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an # example see the documentation. # This tag requires that the tag USE_MATHJAX is set to YES. MATHJAX_CODEFILE = # When the SEARCHENGINE tag is enabled doxygen will generate a search box for # the HTML output. The underlying search engine uses javascript and DHTML and # should work on any modern browser. Note that when using HTML help # (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) # there is already a search function so this one should typically be disabled. # For large projects the javascript based search engine can be slow, then # enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to # search using the keyboard; to jump to the search box use + S # (what the is depends on the OS and browser, but it is typically # , /